2024-12-09 15:31:15,195 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 15:31:15,211 main DEBUG Took 0.013520 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 15:31:15,211 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 15:31:15,212 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 15:31:15,213 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 15:31:15,214 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,224 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 15:31:15,241 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,243 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,244 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,244 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,245 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,245 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,246 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,247 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,247 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,248 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,249 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,249 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,249 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,250 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,250 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,251 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,251 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,252 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,252 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,253 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,253 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,254 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 15:31:15,255 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,255 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 15:31:15,257 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 15:31:15,259 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 15:31:15,260 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 15:31:15,261 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 15:31:15,262 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 15:31:15,262 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 15:31:15,270 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 15:31:15,273 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 15:31:15,274 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 15:31:15,275 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 15:31:15,275 main DEBUG createAppenders(={Console}) 2024-12-09 15:31:15,276 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-09 15:31:15,277 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 15:31:15,277 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-09 15:31:15,278 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 15:31:15,278 main DEBUG OutputStream closed 2024-12-09 15:31:15,279 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 15:31:15,279 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 15:31:15,279 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-09 15:31:15,365 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 15:31:15,368 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 15:31:15,369 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 15:31:15,370 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 15:31:15,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 15:31:15,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 15:31:15,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 15:31:15,372 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 15:31:15,372 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 15:31:15,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 15:31:15,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 15:31:15,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 15:31:15,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 15:31:15,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 15:31:15,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 15:31:15,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 15:31:15,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 15:31:15,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 15:31:15,379 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 15:31:15,380 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-09 15:31:15,380 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 15:31:15,381 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-09T15:31:15,792 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab 2024-12-09 15:31:15,797 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 15:31:15,799 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T15:31:15,823 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-09T15:31:15,911 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=666, ProcessCount=11, AvailableMemoryMB=7033 2024-12-09T15:31:15,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T15:31:15,937 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee, deleteOnExit=true 2024-12-09T15:31:15,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T15:31:15,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/test.cache.data in system properties and HBase conf 2024-12-09T15:31:15,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T15:31:15,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir in system properties and HBase conf 2024-12-09T15:31:15,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T15:31:15,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T15:31:15,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T15:31:16,078 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T15:31:16,247 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T15:31:16,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:31:16,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:31:16,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T15:31:16,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:31:16,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T15:31:16,254 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T15:31:16,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:31:16,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:31:16,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T15:31:16,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/nfs.dump.dir in system properties and HBase conf 2024-12-09T15:31:16,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/java.io.tmpdir in system properties and HBase conf 2024-12-09T15:31:16,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:31:16,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T15:31:16,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T15:31:16,915 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:31:17,435 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T15:31:17,562 INFO [Time-limited test {}] log.Log(170): Logging initialized @3212ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T15:31:17,663 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:31:17,760 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:31:17,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:31:17,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:31:17,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:31:17,821 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:31:17,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:31:17,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:31:18,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/java.io.tmpdir/jetty-localhost-46553-hadoop-hdfs-3_4_1-tests_jar-_-any-9232992401213455977/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:31:18,159 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:46553} 2024-12-09T15:31:18,159 INFO [Time-limited test {}] server.Server(415): Started @3810ms 2024-12-09T15:31:18,199 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:31:18,852 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:31:18,869 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:31:18,875 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:31:18,875 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:31:18,876 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:31:18,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:31:18,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:31:19,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/java.io.tmpdir/jetty-localhost-46525-hadoop-hdfs-3_4_1-tests_jar-_-any-2712132362180865423/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:31:19,043 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:46525} 2024-12-09T15:31:19,044 INFO [Time-limited test {}] server.Server(415): Started @4695ms 2024-12-09T15:31:19,116 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:31:19,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:31:19,345 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:31:19,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:31:19,356 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:31:19,357 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:31:19,360 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:31:19,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:31:19,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/java.io.tmpdir/jetty-localhost-32775-hadoop-hdfs-3_4_1-tests_jar-_-any-5597815700124668956/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:31:19,542 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:32775} 2024-12-09T15:31:19,543 INFO [Time-limited test {}] server.Server(415): Started @5194ms 2024-12-09T15:31:19,547 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:31:19,825 WARN [Thread-94 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data3/current/BP-1717592979-172.17.0.3-1733758277046/current, will proceed with Du for space computation calculation, 2024-12-09T15:31:19,827 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data4/current/BP-1717592979-172.17.0.3-1733758277046/current, will proceed with Du for space computation calculation, 2024-12-09T15:31:19,829 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data1/current/BP-1717592979-172.17.0.3-1733758277046/current, will proceed with Du for space computation calculation, 2024-12-09T15:31:19,828 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data2/current/BP-1717592979-172.17.0.3-1733758277046/current, will proceed with Du for space computation calculation, 2024-12-09T15:31:19,987 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:31:20,015 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:31:20,161 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xada97b21bcc9720 with lease ID 0xf24d968a3978e6f6: Processing first storage report for DS-d1553715-9cdc-48aa-b88b-087355fb011a from datanode DatanodeRegistration(127.0.0.1:39699, datanodeUuid=aaa753a5-4082-463f-a236-20f2a4189eb7, infoPort=40081, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046) 2024-12-09T15:31:20,163 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xada97b21bcc9720 with lease ID 0xf24d968a3978e6f6: from storage DS-d1553715-9cdc-48aa-b88b-087355fb011a node DatanodeRegistration(127.0.0.1:39699, datanodeUuid=aaa753a5-4082-463f-a236-20f2a4189eb7, infoPort=40081, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T15:31:20,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x438bc7e34d7a7b17 with lease ID 0xf24d968a3978e6f7: Processing first storage report for DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd from datanode DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5682fbff-95ff-4421-895e-e0029a532786, infoPort=42231, infoSecurePort=0, ipcPort=36385, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046) 2024-12-09T15:31:20,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x438bc7e34d7a7b17 with lease ID 0xf24d968a3978e6f7: from storage DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd node DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5682fbff-95ff-4421-895e-e0029a532786, infoPort=42231, infoSecurePort=0, ipcPort=36385, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:31:20,165 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x438bc7e34d7a7b17 with lease ID 0xf24d968a3978e6f7: Processing first storage report for DS-4f52d266-21fa-4670-9be9-5ad45b2d3acd from datanode DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5682fbff-95ff-4421-895e-e0029a532786, infoPort=42231, infoSecurePort=0, ipcPort=36385, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046) 2024-12-09T15:31:20,165 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x438bc7e34d7a7b17 with lease ID 0xf24d968a3978e6f7: from storage DS-4f52d266-21fa-4670-9be9-5ad45b2d3acd node DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5682fbff-95ff-4421-895e-e0029a532786, infoPort=42231, infoSecurePort=0, ipcPort=36385, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:31:20,165 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xada97b21bcc9720 with lease ID 0xf24d968a3978e6f6: Processing first storage report for DS-fcd6ffec-c2aa-4ebf-9927-51b58875430f from datanode DatanodeRegistration(127.0.0.1:39699, datanodeUuid=aaa753a5-4082-463f-a236-20f2a4189eb7, infoPort=40081, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046) 2024-12-09T15:31:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xada97b21bcc9720 with lease ID 0xf24d968a3978e6f6: from storage DS-fcd6ffec-c2aa-4ebf-9927-51b58875430f node DatanodeRegistration(127.0.0.1:39699, datanodeUuid=aaa753a5-4082-463f-a236-20f2a4189eb7, infoPort=40081, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1089903234;c=1733758277046), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:31:20,208 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab 2024-12-09T15:31:20,369 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/zookeeper_0, clientPort=63849, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T15:31:20,384 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63849 2024-12-09T15:31:20,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:20,402 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:20,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:31:20,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:31:20,894 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648 with version=8 2024-12-09T15:31:20,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase-staging 2024-12-09T15:31:21,029 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T15:31:21,370 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:31:21,387 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:31:21,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:31:21,396 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:31:21,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:31:21,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:31:21,599 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T15:31:21,694 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T15:31:21,709 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T15:31:21,714 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:31:21,750 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28702 (auto-detected) 2024-12-09T15:31:21,752 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-09T15:31:21,781 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39115 2024-12-09T15:31:21,815 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39115 connecting to ZooKeeper ensemble=127.0.0.1:63849 2024-12-09T15:31:21,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:391150x0, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:31:21,872 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39115-0x1012bcf72b10000 connected 2024-12-09T15:31:21,961 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:21,965 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:21,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:31:21,991 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648, hbase.cluster.distributed=false 2024-12-09T15:31:22,023 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:31:22,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39115 2024-12-09T15:31:22,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39115 2024-12-09T15:31:22,042 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39115 2024-12-09T15:31:22,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39115 2024-12-09T15:31:22,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39115 2024-12-09T15:31:22,192 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:31:22,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:31:22,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:31:22,195 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:31:22,195 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:31:22,195 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:31:22,198 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:31:22,201 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:31:22,203 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46789 2024-12-09T15:31:22,205 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46789 connecting to ZooKeeper ensemble=127.0.0.1:63849 2024-12-09T15:31:22,207 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:22,211 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:22,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467890x0, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:31:22,219 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:467890x0, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:31:22,219 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46789-0x1012bcf72b10001 connected 2024-12-09T15:31:22,224 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:31:22,240 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:31:22,251 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T15:31:22,262 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:31:22,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46789 2024-12-09T15:31:22,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46789 2024-12-09T15:31:22,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46789 2024-12-09T15:31:22,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46789 2024-12-09T15:31:22,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46789 2024-12-09T15:31:22,295 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3651d8ead0f3:39115 2024-12-09T15:31:22,297 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:22,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:31:22,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:31:22,310 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:22,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T15:31:22,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:22,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:22,348 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:31:22,350 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3651d8ead0f3,39115,1733758281102 from backup master directory 2024-12-09T15:31:22,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:31:22,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:22,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:31:22,356 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:31:22,356 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:22,360 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T15:31:22,362 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T15:31:22,448 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase.id] with ID: fd131ba1-6913-45c9-a284-d504c9a579da 2024-12-09T15:31:22,448 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/.tmp/hbase.id 2024-12-09T15:31:22,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:31:22,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:31:22,475 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/.tmp/hbase.id]:[hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase.id] 2024-12-09T15:31:22,533 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:22,539 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T15:31:22,571 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 29ms. 2024-12-09T15:31:22,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:22,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:22,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:31:22,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:31:23,025 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:31:23,028 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T15:31:23,036 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:31:23,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:31:23,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:31:23,126 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store 2024-12-09T15:31:23,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:31:23,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:31:23,195 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T15:31:23,200 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:31:23,202 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:31:23,202 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:31:23,203 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:31:23,204 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:31:23,205 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:31:23,205 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:31:23,207 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758283202Disabling compacts and flushes for region at 1733758283202Disabling writes for close at 1733758283205 (+3 ms)Writing region close event to WAL at 1733758283205Closed at 1733758283205 2024-12-09T15:31:23,210 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/.initializing 2024-12-09T15:31:23,211 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/WALs/3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:23,245 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C39115%2C1733758281102, suffix=, logDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/WALs/3651d8ead0f3,39115,1733758281102, archiveDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/oldWALs, maxLogs=10 2024-12-09T15:31:23,256 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C39115%2C1733758281102.1733758283250 2024-12-09T15:31:23,305 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/WALs/3651d8ead0f3,39115,1733758281102/3651d8ead0f3%2C39115%2C1733758281102.1733758283250 2024-12-09T15:31:23,328 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40081:40081),(127.0.0.1/127.0.0.1:42231:42231)] 2024-12-09T15:31:23,331 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:31:23,332 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:31:23,336 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,337 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,438 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T15:31:23,443 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:23,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:23,448 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,452 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T15:31:23,453 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:23,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:31:23,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,459 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T15:31:23,459 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:23,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:31:23,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T15:31:23,466 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:23,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:31:23,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,474 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,476 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,483 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,484 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,489 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T15:31:23,494 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:31:23,506 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:31:23,509 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797095, jitterRate=0.013558819890022278}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T15:31:23,518 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733758283357Initializing all the Stores at 1733758283361 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758283362 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758283363 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758283363Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758283363Cleaning up temporary data from old regions at 1733758283484 (+121 ms)Region opened successfully at 1733758283518 (+34 ms) 2024-12-09T15:31:23,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T15:31:23,580 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21ba0481, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:31:23,623 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T15:31:23,640 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T15:31:23,641 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T15:31:23,645 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T15:31:23,649 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 4 msec 2024-12-09T15:31:23,657 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-12-09T15:31:23,657 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T15:31:23,691 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T15:31:23,707 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T15:31:23,710 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T15:31:23,714 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T15:31:23,716 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T15:31:23,718 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T15:31:23,721 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T15:31:23,726 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T15:31:23,730 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T15:31:23,732 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T15:31:23,734 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T15:31:23,756 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T15:31:23,759 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T15:31:23,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:31:23,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:31:23,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:23,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:23,770 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3651d8ead0f3,39115,1733758281102, sessionid=0x1012bcf72b10000, setting cluster-up flag (Was=false) 2024-12-09T15:31:23,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:23,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:23,796 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T15:31:23,799 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:23,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:23,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:23,823 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T15:31:23,826 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:23,835 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T15:31:23,880 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(746): ClusterId : fd131ba1-6913-45c9-a284-d504c9a579da 2024-12-09T15:31:23,883 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:31:23,896 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:31:23,897 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:31:23,901 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:31:23,902 DEBUG [RS:0;3651d8ead0f3:46789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@643e3a3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:31:23,922 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3651d8ead0f3:46789 2024-12-09T15:31:23,927 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:31:23,927 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:31:23,927 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:31:23,928 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T15:31:23,931 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,39115,1733758281102 with port=46789, startcode=1733758282141 2024-12-09T15:31:23,941 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T15:31:23,944 DEBUG [RS:0;3651d8ead0f3:46789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:31:23,948 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T15:31:23,955 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3651d8ead0f3,39115,1733758281102 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T15:31:23,965 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:31:23,966 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:31:23,966 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:31:23,966 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:31:23,966 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3651d8ead0f3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T15:31:23,967 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:23,967 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:31:23,967 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:23,988 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:31:23,989 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T15:31:23,993 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733758313993 2024-12-09T15:31:23,995 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T15:31:23,996 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:23,996 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T15:31:23,997 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T15:31:24,002 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T15:31:24,007 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T15:31:24,008 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T15:31:24,009 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T15:31:24,011 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,016 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T15:31:24,018 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T15:31:24,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:31:24,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:31:24,019 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T15:31:24,021 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T15:31:24,022 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T15:31:24,023 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T15:31:24,024 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648 2024-12-09T15:31:24,024 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758284023,5,FailOnTimeoutGroup] 2024-12-09T15:31:24,026 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758284024,5,FailOnTimeoutGroup] 2024-12-09T15:31:24,026 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,026 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T15:31:24,028 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,028 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:31:24,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:31:24,042 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39315, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:31:24,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:31:24,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:31:24,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:31:24,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,050 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39115 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:31:24,053 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39115 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,054 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:31:24,054 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,055 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,055 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:31:24,058 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:31:24,058 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:31:24,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:31:24,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:31:24,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740 2024-12-09T15:31:24,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740 2024-12-09T15:31:24,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:31:24,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:31:24,069 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:31:24,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:31:24,076 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648 2024-12-09T15:31:24,076 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36359 2024-12-09T15:31:24,077 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:31:24,078 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:31:24,080 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871935, jitterRate=0.10872389376163483}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:31:24,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:31:24,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733758284043Initializing all the Stores at 1733758284045 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758284045Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758284045Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758284045Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758284045Cleaning up temporary data from old regions at 1733758284069 (+24 ms)Region opened successfully at 1733758284082 (+13 ms) 2024-12-09T15:31:24,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:31:24,083 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:31:24,083 DEBUG [RS:0;3651d8ead0f3:46789 {}] zookeeper.ZKUtil(111): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:31:24,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:31:24,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:31:24,083 WARN [RS:0;3651d8ead0f3:46789 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:31:24,083 INFO [RS:0;3651d8ead0f3:46789 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:31:24,083 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,087 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:31:24,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758284082Disabling compacts and flushes for region at 1733758284082Disabling writes for close at 1733758284083 (+1 ms)Writing region close event to WAL at 1733758284087 (+4 ms)Closed at 1733758284087 2024-12-09T15:31:24,089 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,46789,1733758282141] 2024-12-09T15:31:24,092 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:31:24,092 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T15:31:24,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T15:31:24,112 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:31:24,115 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T15:31:24,120 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:31:24,140 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:31:24,148 INFO [RS:0;3651d8ead0f3:46789 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:31:24,148 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,154 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:31:24,163 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:31:24,165 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,166 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,166 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,166 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,166 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,167 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,167 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:31:24,167 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,167 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,167 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,168 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,168 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,168 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:31:24,168 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:31:24,168 DEBUG [RS:0;3651d8ead0f3:46789 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:31:24,170 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,174 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,175 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,175 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,175 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,175 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,46789,1733758282141-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:31:24,197 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:31:24,199 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,46789,1733758282141-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,200 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,200 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.Replication(171): 3651d8ead0f3,46789,1733758282141 started 2024-12-09T15:31:24,233 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:24,234 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,46789,1733758282141, RpcServer on 3651d8ead0f3/172.17.0.3:46789, sessionid=0x1012bcf72b10001 2024-12-09T15:31:24,235 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:31:24,235 DEBUG [RS:0;3651d8ead0f3:46789 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,235 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,46789,1733758282141' 2024-12-09T15:31:24,236 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:31:24,237 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:31:24,238 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:31:24,238 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:31:24,239 DEBUG [RS:0;3651d8ead0f3:46789 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,239 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,46789,1733758282141' 2024-12-09T15:31:24,239 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:31:24,240 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:31:24,241 DEBUG [RS:0;3651d8ead0f3:46789 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:31:24,241 INFO [RS:0;3651d8ead0f3:46789 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:31:24,241 INFO [RS:0;3651d8ead0f3:46789 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:31:24,266 WARN [3651d8ead0f3:39115 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T15:31:24,352 INFO [RS:0;3651d8ead0f3:46789 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C46789%2C1733758282141, suffix=, logDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141, archiveDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs, maxLogs=32 2024-12-09T15:31:24,357 INFO [RS:0;3651d8ead0f3:46789 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758284356 2024-12-09T15:31:24,380 INFO [RS:0;3651d8ead0f3:46789 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758284356 2024-12-09T15:31:24,383 DEBUG [RS:0;3651d8ead0f3:46789 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:31:24,520 DEBUG [3651d8ead0f3:39115 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T15:31:24,533 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,540 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,46789,1733758282141, state=OPENING 2024-12-09T15:31:24,547 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T15:31:24,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:24,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:31:24,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:31:24,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:31:24,553 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:31:24,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,46789,1733758282141}] 2024-12-09T15:31:24,737 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T15:31:24,741 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60963, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T15:31:24,754 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T15:31:24,755 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:31:24,760 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C46789%2C1733758282141.meta, suffix=.meta, logDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141, archiveDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs, maxLogs=32 2024-12-09T15:31:24,762 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.meta.1733758284761.meta 2024-12-09T15:31:24,773 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.meta.1733758284761.meta 2024-12-09T15:31:24,778 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:31:24,782 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:31:24,785 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T15:31:24,789 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T15:31:24,796 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T15:31:24,803 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T15:31:24,804 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:31:24,805 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T15:31:24,805 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T15:31:24,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:31:24,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:31:24,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:31:24,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:31:24,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:31:24,820 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:31:24,820 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:31:24,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:31:24,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:24,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:31:24,825 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:31:24,827 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740 2024-12-09T15:31:24,831 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740 2024-12-09T15:31:24,834 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:31:24,834 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:31:24,835 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:31:24,838 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:31:24,841 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738040, jitterRate=-0.061534568667411804}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:31:24,841 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T15:31:24,843 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733758284805Writing region info on filesystem at 1733758284806 (+1 ms)Initializing all the Stores at 1733758284809 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758284809Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758284809Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758284809Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758284809Cleaning up temporary data from old regions at 1733758284834 (+25 ms)Running coprocessor post-open hooks at 1733758284841 (+7 ms)Region opened successfully at 1733758284843 (+2 ms) 2024-12-09T15:31:24,852 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733758284726 2024-12-09T15:31:24,880 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T15:31:24,881 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T15:31:24,888 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,892 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,46789,1733758282141, state=OPEN 2024-12-09T15:31:24,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:31:24,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:31:24,915 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:24,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:31:24,919 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:31:24,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T15:31:24,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,46789,1733758282141 in 359 msec 2024-12-09T15:31:24,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T15:31:24,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 828 msec 2024-12-09T15:31:24,941 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:31:24,942 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T15:31:24,967 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:31:24,969 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,46789,1733758282141, seqNum=-1] 2024-12-09T15:31:24,993 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:31:24,999 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36767, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:31:25,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1590 sec 2024-12-09T15:31:25,033 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733758285033, completionTime=-1 2024-12-09T15:31:25,037 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T15:31:25,038 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T15:31:25,077 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T15:31:25,077 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733758345077 2024-12-09T15:31:25,078 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733758405078 2024-12-09T15:31:25,078 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 40 msec 2024-12-09T15:31:25,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,39115,1733758281102-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:25,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,39115,1733758281102-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:25,082 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,39115,1733758281102-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:25,083 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3651d8ead0f3:39115, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:25,084 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:25,084 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:25,092 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T15:31:25,117 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.760sec 2024-12-09T15:31:25,118 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T15:31:25,120 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T15:31:25,121 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T15:31:25,122 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T15:31:25,122 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T15:31:25,123 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,39115,1733758281102-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:31:25,124 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,39115,1733758281102-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T15:31:25,142 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T15:31:25,143 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T15:31:25,144 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,39115,1733758281102-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:31:25,195 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61c64a9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:31:25,198 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T15:31:25,198 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T15:31:25,203 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3651d8ead0f3,39115,-1 for getting cluster id 2024-12-09T15:31:25,206 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T15:31:25,216 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fd131ba1-6913-45c9-a284-d504c9a579da' 2024-12-09T15:31:25,219 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T15:31:25,219 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fd131ba1-6913-45c9-a284-d504c9a579da" 2024-12-09T15:31:25,222 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e60f740, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:31:25,222 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3651d8ead0f3,39115,-1] 2024-12-09T15:31:25,226 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T15:31:25,232 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:31:25,234 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43756, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T15:31:25,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@231106e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:31:25,239 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:31:25,249 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,46789,1733758282141, seqNum=-1] 2024-12-09T15:31:25,250 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:31:25,253 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54428, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:31:25,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:25,283 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:31:25,295 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T15:31:25,301 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T15:31:25,307 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3651d8ead0f3,39115,1733758281102 2024-12-09T15:31:25,312 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@43cb8576 2024-12-09T15:31:25,313 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T15:31:25,318 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T15:31:25,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39115 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T15:31:25,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39115 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T15:31:25,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39115 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:31:25,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39115 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-09T15:31:25,341 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T15:31:25,344 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39115 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-09T15:31:25,344 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:25,351 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T15:31:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39115 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:31:25,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741835_1011 (size=389) 2024-12-09T15:31:25,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741835_1011 (size=389) 2024-12-09T15:31:25,466 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6de1a9a9e60087ffa9d1a0a7c799aa30, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648 2024-12-09T15:31:25,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741836_1012 (size=72) 2024-12-09T15:31:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741836_1012 (size=72) 2024-12-09T15:31:25,568 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:31:25,568 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6de1a9a9e60087ffa9d1a0a7c799aa30, disabling compactions & flushes 2024-12-09T15:31:25,568 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:25,568 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:25,568 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. after waiting 0 ms 2024-12-09T15:31:25,569 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:25,569 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:25,569 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: Waiting for close lock at 1733758285568Disabling compacts and flushes for region at 1733758285568Disabling writes for close at 1733758285568Writing region close event to WAL at 1733758285569 (+1 ms)Closed at 1733758285569 2024-12-09T15:31:25,573 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T15:31:25,581 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733758285574"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733758285574"}]},"ts":"1733758285574"} 2024-12-09T15:31:25,589 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T15:31:25,591 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T15:31:25,594 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758285591"}]},"ts":"1733758285591"} 2024-12-09T15:31:25,601 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-09T15:31:25,603 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6de1a9a9e60087ffa9d1a0a7c799aa30, ASSIGN}] 2024-12-09T15:31:25,607 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6de1a9a9e60087ffa9d1a0a7c799aa30, ASSIGN 2024-12-09T15:31:25,611 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6de1a9a9e60087ffa9d1a0a7c799aa30, ASSIGN; state=OFFLINE, location=3651d8ead0f3,46789,1733758282141; forceNewPlan=false, retain=false 2024-12-09T15:31:25,763 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6de1a9a9e60087ffa9d1a0a7c799aa30, regionState=OPENING, regionLocation=3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:25,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6de1a9a9e60087ffa9d1a0a7c799aa30, ASSIGN because future has completed 2024-12-09T15:31:25,779 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6de1a9a9e60087ffa9d1a0a7c799aa30, server=3651d8ead0f3,46789,1733758282141}] 2024-12-09T15:31:25,952 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:25,953 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6de1a9a9e60087ffa9d1a0a7c799aa30, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:31:25,953 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,953 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:31:25,954 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,954 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,958 INFO [StoreOpener-6de1a9a9e60087ffa9d1a0a7c799aa30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,961 INFO [StoreOpener-6de1a9a9e60087ffa9d1a0a7c799aa30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6de1a9a9e60087ffa9d1a0a7c799aa30 columnFamilyName info 2024-12-09T15:31:25,962 DEBUG [StoreOpener-6de1a9a9e60087ffa9d1a0a7c799aa30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:31:25,963 INFO [StoreOpener-6de1a9a9e60087ffa9d1a0a7c799aa30-1 {}] regionserver.HStore(327): Store=6de1a9a9e60087ffa9d1a0a7c799aa30/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:31:25,964 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,965 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,966 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,967 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,967 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,971 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,976 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:31:25,977 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6de1a9a9e60087ffa9d1a0a7c799aa30; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726031, jitterRate=-0.07680472731590271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T15:31:25,978 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:25,979 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: Running coprocessor pre-open hook at 1733758285954Writing region info on filesystem at 1733758285954Initializing all the Stores at 1733758285957 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758285957Cleaning up temporary data from old regions at 1733758285968 (+11 ms)Running coprocessor post-open hooks at 1733758285978 (+10 ms)Region opened successfully at 1733758285979 (+1 ms) 2024-12-09T15:31:25,981 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30., pid=6, masterSystemTime=1733758285935 2024-12-09T15:31:25,989 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:25,989 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:25,991 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6de1a9a9e60087ffa9d1a0a7c799aa30, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,46789,1733758282141 2024-12-09T15:31:26,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6de1a9a9e60087ffa9d1a0a7c799aa30, server=3651d8ead0f3,46789,1733758282141 because future has completed 2024-12-09T15:31:26,026 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T15:31:26,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6de1a9a9e60087ffa9d1a0a7c799aa30, server=3651d8ead0f3,46789,1733758282141 in 234 msec 2024-12-09T15:31:26,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T15:31:26,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6de1a9a9e60087ffa9d1a0a7c799aa30, ASSIGN in 423 msec 2024-12-09T15:31:26,036 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T15:31:26,036 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758286036"}]},"ts":"1733758286036"} 2024-12-09T15:31:26,045 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-09T15:31:26,048 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T15:31:26,053 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 720 msec 2024-12-09T15:31:30,317 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T15:31:30,377 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T15:31:30,379 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-09T15:31:31,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:31:31,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T15:31:31,692 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T15:31:31,692 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T15:31:31,693 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:31:31,694 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T15:31:31,694 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T15:31:31,694 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T15:31:35,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39115 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:31:35,404 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-09T15:31:35,406 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-09T15:31:35,414 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-09T15:31:35,415 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:31:35,416 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758295415 2024-12-09T15:31:35,425 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:35,425 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:35,425 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:35,425 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:35,425 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:35,426 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758284356 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758295415 2024-12-09T15:31:35,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:31:35,428 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758284356 is not closed yet, will try archiving it next time 2024-12-09T15:31:35,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741833_1009 (size=451) 2024-12-09T15:31:35,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741833_1009 (size=451) 2024-12-09T15:31:35,437 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30., hostname=3651d8ead0f3,46789,1733758282141, seqNum=2] 2024-12-09T15:31:35,832 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758284356 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs/3651d8ead0f3%2C46789%2C1733758282141.1733758284356 2024-12-09T15:31:47,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46789 {}] regionserver.HRegion(8855): Flush requested on 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:31:47,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:31:47,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/91f7d6fec44d4f6d936c2ba62b33d671 is 1080, key is row0001/info:/1733758295440/Put/seqid=0 2024-12-09T15:31:47,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741838_1014 (size=12509) 2024-12-09T15:31:47,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741838_1014 (size=12509) 2024-12-09T15:31:47,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/91f7d6fec44d4f6d936c2ba62b33d671 2024-12-09T15:31:47,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/91f7d6fec44d4f6d936c2ba62b33d671 as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671 2024-12-09T15:31:47,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T15:31:47,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6de1a9a9e60087ffa9d1a0a7c799aa30 in 175ms, sequenceid=11, compaction requested=false 2024-12-09T15:31:47,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: 2024-12-09T15:31:50,204 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:31:55,491 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758315491 2024-12-09T15:31:55,717 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 221 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:31:55,717 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:55,718 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:55,718 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:55,718 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:55,718 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:31:55,719 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758295415 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758315491 2024-12-09T15:31:55,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741837_1013 (size=12399) 2024-12-09T15:31:55,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741837_1013 (size=12399) 2024-12-09T15:31:55,732 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:31:55,939 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:31:58,144 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:00,353 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:02,561 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46789 {}] regionserver.HRegion(8855): Flush requested on 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:32:02,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:32:02,766 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:02,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/61b9899a68484b9e81fcabbdaac2f92d is 1080, key is row0008/info:/1733758309476/Put/seqid=0 2024-12-09T15:32:02,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741840_1016 (size=12509) 2024-12-09T15:32:02,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741840_1016 (size=12509) 2024-12-09T15:32:02,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/61b9899a68484b9e81fcabbdaac2f92d 2024-12-09T15:32:02,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/61b9899a68484b9e81fcabbdaac2f92d as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/61b9899a68484b9e81fcabbdaac2f92d 2024-12-09T15:32:02,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/61b9899a68484b9e81fcabbdaac2f92d, entries=7, sequenceid=21, filesize=12.2 K 2024-12-09T15:32:03,075 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:03,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6de1a9a9e60087ffa9d1a0a7c799aa30 in 514ms, sequenceid=21, compaction requested=false 2024-12-09T15:32:03,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: 2024-12-09T15:32:03,076 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-09T15:32:03,076 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:32:03,077 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671 because midkey is the same as first or last row 2024-12-09T15:32:04,765 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:05,402 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T15:32:05,402 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T15:32:06,970 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:06,973 WARN [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:06,975 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C46789%2C1733758282141:(num 1733758315491) roll requested 2024-12-09T15:32:06,975 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758326975 2024-12-09T15:32:07,184 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:07,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:07,185 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:07,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:07,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:07,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:07,186 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758315491 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758326975 2024-12-09T15:32:07,187 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:32:07,187 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758315491 is not closed yet, will try archiving it next time 2024-12-09T15:32:07,187 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758295415 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs/3651d8ead0f3%2C46789%2C1733758282141.1733758295415 2024-12-09T15:32:07,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741839_1015 (size=7739) 2024-12-09T15:32:07,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741839_1015 (size=7739) 2024-12-09T15:32:09,175 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:10,954 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6de1a9a9e60087ffa9d1a0a7c799aa30, had cached 0 bytes from a total of 25018 2024-12-09T15:32:11,380 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:13,586 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:15,795 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:17,797 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T15:32:17,798 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758337798 2024-12-09T15:32:20,205 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:32:22,809 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:22,811 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:22,811 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C46789%2C1733758282141:(num 1733758337798) roll requested 2024-12-09T15:32:22,812 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:22,812 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:22,812 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:22,813 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:22,813 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:22,813 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758326975 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758337798 2024-12-09T15:32:22,814 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:32:22,814 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758326975 is not closed yet, will try archiving it next time 2024-12-09T15:32:22,815 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758342815 2024-12-09T15:32:22,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741841_1017 (size=4753) 2024-12-09T15:32:22,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741841_1017 (size=4753) 2024-12-09T15:32:27,819 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:27,819 WARN [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46789 {}] regionserver.HRegion(8855): Flush requested on 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:32:27,820 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:32:27,826 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:27,826 WARN [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:29,821 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T15:32:32,822 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:32,823 WARN [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:32,823 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:32,823 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:32,823 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:32,823 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:32,824 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:32,824 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758337798 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758342815 2024-12-09T15:32:32,825 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:32:32,825 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758337798 is not closed yet, will try archiving it next time 2024-12-09T15:32:32,825 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C46789%2C1733758282141:(num 1733758342815) roll requested 2024-12-09T15:32:32,826 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758352825 2024-12-09T15:32:32,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741842_1018 (size=1569) 2024-12-09T15:32:32,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/8f3cb7fad7ae45e7ad254c0894113710 is 1080, key is row0015/info:/1733758324564/Put/seqid=0 2024-12-09T15:32:32,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741842_1018 (size=1569) 2024-12-09T15:32:32,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741844_1020 (size=12509) 2024-12-09T15:32:32,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741844_1020 (size=12509) 2024-12-09T15:32:32,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/8f3cb7fad7ae45e7ad254c0894113710 2024-12-09T15:32:32,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/8f3cb7fad7ae45e7ad254c0894113710 as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/8f3cb7fad7ae45e7ad254c0894113710 2024-12-09T15:32:32,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/8f3cb7fad7ae45e7ad254c0894113710, entries=7, sequenceid=31, filesize=12.2 K 2024-12-09T15:32:37,834 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:37,834 WARN [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:37,873 INFO [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:37,873 WARN [FSHLog-0-hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648-prefix:3651d8ead0f3,46789,1733758282141 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41631,DS-b79c0664-dbb5-4bba-91a0-06bfd45edbbd,DISK], DatanodeInfoWithStorage[127.0.0.1:39699,DS-d1553715-9cdc-48aa-b88b-087355fb011a,DISK]] 2024-12-09T15:32:37,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6de1a9a9e60087ffa9d1a0a7c799aa30 in 10053ms, sequenceid=31, compaction requested=true 2024-12-09T15:32:37,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: 2024-12-09T15:32:37,873 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,873 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-09T15:32:37,873 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,873 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:32:37,873 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671 because midkey is the same as first or last row 2024-12-09T15:32:37,874 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,874 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758342815 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758352825 2024-12-09T15:32:37,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6de1a9a9e60087ffa9d1a0a7c799aa30:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:32:37,875 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40081:40081),(127.0.0.1/127.0.0.1:42231:42231)] 2024-12-09T15:32:37,875 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758342815 is not closed yet, will try archiving it next time 2024-12-09T15:32:37,876 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758315491 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs/3651d8ead0f3%2C46789%2C1733758282141.1733758315491 2024-12-09T15:32:37,876 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C46789%2C1733758282141:(num 1733758357876) roll requested 2024-12-09T15:32:37,876 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758357876 2024-12-09T15:32:37,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741843_1019 (size=438) 2024-12-09T15:32:37,878 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:32:37,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:32:37,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741843_1019 (size=438) 2024-12-09T15:32:37,880 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758326975 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs/3651d8ead0f3%2C46789%2C1733758282141.1733758326975 2024-12-09T15:32:37,881 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:32:37,882 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758337798 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs/3651d8ead0f3%2C46789%2C1733758282141.1733758337798 2024-12-09T15:32:37,882 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.HStore(1541): 6de1a9a9e60087ffa9d1a0a7c799aa30/info is initiating minor compaction (all files) 2024-12-09T15:32:37,883 INFO [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6de1a9a9e60087ffa9d1a0a7c799aa30/info in TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:32:37,883 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758342815 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs/3651d8ead0f3%2C46789%2C1733758282141.1733758342815 2024-12-09T15:32:37,884 INFO [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671, hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/61b9899a68484b9e81fcabbdaac2f92d, hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/8f3cb7fad7ae45e7ad254c0894113710] into tmpdir=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp, totalSize=36.6 K 2024-12-09T15:32:37,885 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91f7d6fec44d4f6d936c2ba62b33d671, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733758295440 2024-12-09T15:32:37,885 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,885 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,885 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 61b9899a68484b9e81fcabbdaac2f92d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733758309476 2024-12-09T15:32:37,886 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,886 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,886 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,886 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758352825 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758357876 2024-12-09T15:32:37,886 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8f3cb7fad7ae45e7ad254c0894113710, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733758324564 2024-12-09T15:32:37,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741845_1021 (size=93) 2024-12-09T15:32:37,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741845_1021 (size=93) 2024-12-09T15:32:37,889 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758352825 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs/3651d8ead0f3%2C46789%2C1733758282141.1733758352825 2024-12-09T15:32:37,895 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:32:37,895 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C46789%2C1733758282141.1733758357895 2024-12-09T15:32:37,906 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,906 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,906 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,907 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,907 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:37,907 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758357876 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758357895 2024-12-09T15:32:37,908 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:40081:40081)] 2024-12-09T15:32:37,908 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/WALs/3651d8ead0f3,46789,1733758282141/3651d8ead0f3%2C46789%2C1733758282141.1733758357876 is not closed yet, will try archiving it next time 2024-12-09T15:32:37,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741846_1022 (size=1258) 2024-12-09T15:32:37,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741846_1022 (size=1258) 2024-12-09T15:32:37,924 INFO [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6de1a9a9e60087ffa9d1a0a7c799aa30#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:32:37,926 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/7bb259e7dc86495c94e1736034c7df1f is 1080, key is row0001/info:/1733758295440/Put/seqid=0 2024-12-09T15:32:37,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741848_1024 (size=27710) 2024-12-09T15:32:37,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741848_1024 (size=27710) 2024-12-09T15:32:37,945 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/7bb259e7dc86495c94e1736034c7df1f as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/7bb259e7dc86495c94e1736034c7df1f 2024-12-09T15:32:37,967 INFO [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6de1a9a9e60087ffa9d1a0a7c799aa30/info of 6de1a9a9e60087ffa9d1a0a7c799aa30 into 7bb259e7dc86495c94e1736034c7df1f(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:32:37,968 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: 2024-12-09T15:32:37,971 INFO [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30., storeName=6de1a9a9e60087ffa9d1a0a7c799aa30/info, priority=13, startTime=1733758357875; duration=0sec 2024-12-09T15:32:37,971 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T15:32:37,971 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:32:37,971 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/7bb259e7dc86495c94e1736034c7df1f because midkey is the same as first or last row 2024-12-09T15:32:37,975 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T15:32:37,975 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:32:37,975 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/7bb259e7dc86495c94e1736034c7df1f because midkey is the same as first or last row 2024-12-09T15:32:37,975 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T15:32:37,975 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:32:37,975 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/7bb259e7dc86495c94e1736034c7df1f because midkey is the same as first or last row 2024-12-09T15:32:37,975 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:32:37,976 DEBUG [RS:0;3651d8ead0f3:46789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6de1a9a9e60087ffa9d1a0a7c799aa30:info 2024-12-09T15:32:49,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46789 {}] regionserver.HRegion(8855): Flush requested on 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:32:49,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:32:49,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/0b531abfdde746388f191fe57e9c995c is 1080, key is row0022/info:/1733758357897/Put/seqid=0 2024-12-09T15:32:49,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741849_1025 (size=12509) 2024-12-09T15:32:49,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741849_1025 (size=12509) 2024-12-09T15:32:49,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/0b531abfdde746388f191fe57e9c995c 2024-12-09T15:32:49,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/0b531abfdde746388f191fe57e9c995c as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/0b531abfdde746388f191fe57e9c995c 2024-12-09T15:32:49,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/0b531abfdde746388f191fe57e9c995c, entries=7, sequenceid=42, filesize=12.2 K 2024-12-09T15:32:49,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6de1a9a9e60087ffa9d1a0a7c799aa30 in 49ms, sequenceid=42, compaction requested=false 2024-12-09T15:32:49,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: 2024-12-09T15:32:49,975 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-09T15:32:49,975 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:32:49,975 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/7bb259e7dc86495c94e1736034c7df1f because midkey is the same as first or last row 2024-12-09T15:32:50,205 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:32:55,954 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6de1a9a9e60087ffa9d1a0a7c799aa30, had cached 0 bytes from a total of 40219 2024-12-09T15:32:57,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T15:32:57,945 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:32:57,946 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:32:57,952 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:32:57,954 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T15:32:57,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:32:57,954 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T15:32:57,954 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1014812272, stopped=false 2024-12-09T15:32:57,956 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3651d8ead0f3,39115,1733758281102 2024-12-09T15:32:57,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:32:57,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:32:57,963 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:32:57,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:32:57,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:32:57,964 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:32:57,965 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:32:57,965 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:32:57,965 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,46789,1733758282141' ***** 2024-12-09T15:32:57,966 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:32:57,966 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:32:57,966 INFO [RS:0;3651d8ead0f3:46789 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:32:57,967 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:32:57,967 INFO [RS:0;3651d8ead0f3:46789 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:32:57,967 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(3091): Received CLOSE for 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:32:57,968 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:32:57,970 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:32:57,973 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,46789,1733758282141 2024-12-09T15:32:57,973 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:32:57,974 INFO [RS:0;3651d8ead0f3:46789 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3651d8ead0f3:46789. 2024-12-09T15:32:57,974 DEBUG [RS:0;3651d8ead0f3:46789 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:32:57,974 DEBUG [RS:0;3651d8ead0f3:46789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:32:57,974 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:32:57,974 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:32:57,974 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:32:57,974 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T15:32:57,976 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6de1a9a9e60087ffa9d1a0a7c799aa30, disabling compactions & flushes 2024-12-09T15:32:57,976 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:32:57,976 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:32:57,976 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. after waiting 0 ms 2024-12-09T15:32:57,976 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:32:57,976 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T15:32:57,992 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T15:32:57,992 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1325): Online Regions={6de1a9a9e60087ffa9d1a0a7c799aa30=TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T15:32:57,992 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:32:57,994 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:32:57,994 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:32:57,994 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:32:57,994 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:32:57,994 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:32:57,994 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-09T15:32:58,012 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/cdc0055962a44755a990380f145316b5 is 1080, key is row0029/info:/1733758371929/Put/seqid=0 2024-12-09T15:32:58,089 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/info/5226a611ba7a48159ff87266aa018033 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30./info:regioninfo/1733758285991/Put/seqid=0 2024-12-09T15:32:58,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741850_1026 (size=8193) 2024-12-09T15:32:58,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741850_1026 (size=8193) 2024-12-09T15:32:58,119 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/cdc0055962a44755a990380f145316b5 2024-12-09T15:32:58,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741851_1027 (size=7016) 2024-12-09T15:32:58,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741851_1027 (size=7016) 2024-12-09T15:32:58,182 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T15:32:58,183 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T15:32:58,183 INFO [regionserver/3651d8ead0f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:32:58,191 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/.tmp/info/cdc0055962a44755a990380f145316b5 as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/cdc0055962a44755a990380f145316b5 2024-12-09T15:32:58,193 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6de1a9a9e60087ffa9d1a0a7c799aa30 2024-12-09T15:32:58,228 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/cdc0055962a44755a990380f145316b5, entries=3, sequenceid=48, filesize=8.0 K 2024-12-09T15:32:58,231 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6de1a9a9e60087ffa9d1a0a7c799aa30 in 255ms, sequenceid=48, compaction requested=true 2024-12-09T15:32:58,233 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671, hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/61b9899a68484b9e81fcabbdaac2f92d, hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/8f3cb7fad7ae45e7ad254c0894113710] to archive 2024-12-09T15:32:58,250 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T15:32:58,276 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/91f7d6fec44d4f6d936c2ba62b33d671 2024-12-09T15:32:58,295 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/61b9899a68484b9e81fcabbdaac2f92d to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/61b9899a68484b9e81fcabbdaac2f92d 2024-12-09T15:32:58,299 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/8f3cb7fad7ae45e7ad254c0894113710 to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/info/8f3cb7fad7ae45e7ad254c0894113710 2024-12-09T15:32:58,318 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3651d8ead0f3:39115 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T15:32:58,324 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [91f7d6fec44d4f6d936c2ba62b33d671=12509, 61b9899a68484b9e81fcabbdaac2f92d=12509, 8f3cb7fad7ae45e7ad254c0894113710=12509] 2024-12-09T15:32:58,333 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/default/TestLogRolling-testSlowSyncLogRolling/6de1a9a9e60087ffa9d1a0a7c799aa30/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-09T15:32:58,336 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:32:58,337 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6de1a9a9e60087ffa9d1a0a7c799aa30: Waiting for close lock at 1733758377975Running coprocessor pre-close hooks at 1733758377976 (+1 ms)Disabling compacts and flushes for region at 1733758377976Disabling writes for close at 1733758377976Obtaining lock to block concurrent updates at 1733758377976Preparing flush snapshotting stores in 6de1a9a9e60087ffa9d1a0a7c799aa30 at 1733758377976Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733758377981 (+5 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. at 1733758377987 (+6 ms)Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30/info: creating writer at 1733758377987Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30/info: appending metadata at 1733758378011 (+24 ms)Flushing 6de1a9a9e60087ffa9d1a0a7c799aa30/info: closing flushed file at 1733758378011Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7636a5b8: reopening flushed file at 1733758378186 (+175 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6de1a9a9e60087ffa9d1a0a7c799aa30 in 255ms, sequenceid=48, compaction requested=true at 1733758378231 (+45 ms)Writing region close event to WAL at 1733758378325 (+94 ms)Running coprocessor post-close hooks at 1733758378334 (+9 ms)Closed at 1733758378336 (+2 ms) 2024-12-09T15:32:58,337 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733758285320.6de1a9a9e60087ffa9d1a0a7c799aa30. 2024-12-09T15:32:58,393 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:32:58,556 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/info/5226a611ba7a48159ff87266aa018033 2024-12-09T15:32:58,583 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/ns/dc85f418df0b4884bae6072a8485f2fa is 43, key is default/ns:d/1733758285006/Put/seqid=0 2024-12-09T15:32:58,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741852_1028 (size=5153) 2024-12-09T15:32:58,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741852_1028 (size=5153) 2024-12-09T15:32:58,593 DEBUG [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:32:58,593 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/ns/dc85f418df0b4884bae6072a8485f2fa 2024-12-09T15:32:58,624 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/table/1f482c957598416085987e0b8f397594 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733758286036/Put/seqid=0 2024-12-09T15:32:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741853_1029 (size=5396) 2024-12-09T15:32:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741853_1029 (size=5396) 2024-12-09T15:32:58,632 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/table/1f482c957598416085987e0b8f397594 2024-12-09T15:32:58,641 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/info/5226a611ba7a48159ff87266aa018033 as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/info/5226a611ba7a48159ff87266aa018033 2024-12-09T15:32:58,650 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/info/5226a611ba7a48159ff87266aa018033, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T15:32:58,652 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/ns/dc85f418df0b4884bae6072a8485f2fa as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/ns/dc85f418df0b4884bae6072a8485f2fa 2024-12-09T15:32:58,661 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/ns/dc85f418df0b4884bae6072a8485f2fa, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T15:32:58,662 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/.tmp/table/1f482c957598416085987e0b8f397594 as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/table/1f482c957598416085987e0b8f397594 2024-12-09T15:32:58,669 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/table/1f482c957598416085987e0b8f397594, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T15:32:58,670 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 676ms, sequenceid=11, compaction requested=false 2024-12-09T15:32:58,678 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T15:32:58,679 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:32:58,679 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:32:58,679 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758377993Running coprocessor pre-close hooks at 1733758377993Disabling compacts and flushes for region at 1733758377993Disabling writes for close at 1733758377994 (+1 ms)Obtaining lock to block concurrent updates at 1733758377994Preparing flush snapshotting stores in 1588230740 at 1733758377994Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733758377996 (+2 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733758378015 (+19 ms)Flushing 1588230740/info: creating writer at 1733758378016 (+1 ms)Flushing 1588230740/info: appending metadata at 1733758378088 (+72 ms)Flushing 1588230740/info: closing flushed file at 1733758378088Flushing 1588230740/ns: creating writer at 1733758378566 (+478 ms)Flushing 1588230740/ns: appending metadata at 1733758378583 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733758378583Flushing 1588230740/table: creating writer at 1733758378602 (+19 ms)Flushing 1588230740/table: appending metadata at 1733758378624 (+22 ms)Flushing 1588230740/table: closing flushed file at 1733758378624Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f68104c: reopening flushed file at 1733758378640 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6db3c6e3: reopening flushed file at 1733758378651 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b2e6aa4: reopening flushed file at 1733758378661 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 676ms, sequenceid=11, compaction requested=false at 1733758378670 (+9 ms)Writing region close event to WAL at 1733758378672 (+2 ms)Running coprocessor post-close hooks at 1733758378679 (+7 ms)Closed at 1733758378679 2024-12-09T15:32:58,680 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T15:32:58,793 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,46789,1733758282141; all regions closed. 2024-12-09T15:32:58,797 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,797 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,798 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,798 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,798 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741834_1010 (size=3066) 2024-12-09T15:32:58,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741834_1010 (size=3066) 2024-12-09T15:32:58,819 DEBUG [RS:0;3651d8ead0f3:46789 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs 2024-12-09T15:32:58,819 INFO [RS:0;3651d8ead0f3:46789 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C46789%2C1733758282141.meta:.meta(num 1733758284761) 2024-12-09T15:32:58,823 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,823 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,823 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,826 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,826 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:58,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741847_1023 (size=12695) 2024-12-09T15:32:58,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741847_1023 (size=12695) 2024-12-09T15:32:59,247 DEBUG [RS:0;3651d8ead0f3:46789 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/oldWALs 2024-12-09T15:32:59,247 INFO [RS:0;3651d8ead0f3:46789 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C46789%2C1733758282141:(num 1733758357895) 2024-12-09T15:32:59,247 DEBUG [RS:0;3651d8ead0f3:46789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:32:59,247 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:32:59,248 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:32:59,248 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T15:32:59,248 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:32:59,249 INFO [RS:0;3651d8ead0f3:46789 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46789 2024-12-09T15:32:59,251 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:32:59,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:32:59,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,46789,1733758282141 2024-12-09T15:32:59,256 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:32:59,259 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,46789,1733758282141] 2024-12-09T15:32:59,264 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,46789,1733758282141 already deleted, retry=false 2024-12-09T15:32:59,264 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,46789,1733758282141 expired; onlineServers=0 2024-12-09T15:32:59,264 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3651d8ead0f3,39115,1733758281102' ***** 2024-12-09T15:32:59,264 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T15:32:59,265 INFO [M:0;3651d8ead0f3:39115 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:32:59,265 INFO [M:0;3651d8ead0f3:39115 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:32:59,265 DEBUG [M:0;3651d8ead0f3:39115 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T15:32:59,265 DEBUG [M:0;3651d8ead0f3:39115 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T15:32:59,265 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T15:32:59,265 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758284024 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758284024,5,FailOnTimeoutGroup] 2024-12-09T15:32:59,266 INFO [M:0;3651d8ead0f3:39115 {}] hbase.ChoreService(370): Chore service for: master/3651d8ead0f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T15:32:59,266 INFO [M:0;3651d8ead0f3:39115 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:32:59,266 DEBUG [M:0;3651d8ead0f3:39115 {}] master.HMaster(1795): Stopping service threads 2024-12-09T15:32:59,266 INFO [M:0;3651d8ead0f3:39115 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T15:32:59,266 INFO [M:0;3651d8ead0f3:39115 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:32:59,266 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758284023 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758284023,5,FailOnTimeoutGroup] 2024-12-09T15:32:59,267 INFO [M:0;3651d8ead0f3:39115 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T15:32:59,267 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T15:32:59,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T15:32:59,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:32:59,271 DEBUG [M:0;3651d8ead0f3:39115 {}] zookeeper.ZKUtil(347): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T15:32:59,271 WARN [M:0;3651d8ead0f3:39115 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T15:32:59,272 INFO [M:0;3651d8ead0f3:39115 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/.lastflushedseqids 2024-12-09T15:32:59,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741854_1030 (size=130) 2024-12-09T15:32:59,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741854_1030 (size=130) 2024-12-09T15:32:59,351 INFO [M:0;3651d8ead0f3:39115 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T15:32:59,351 INFO [M:0;3651d8ead0f3:39115 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T15:32:59,351 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:32:59,351 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:32:59,351 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:32:59,351 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:32:59,351 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:32:59,352 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-12-09T15:32:59,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:32:59,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46789-0x1012bcf72b10001, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:32:59,365 INFO [RS:0;3651d8ead0f3:46789 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:32:59,365 INFO [RS:0;3651d8ead0f3:46789 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,46789,1733758282141; zookeeper connection closed. 2024-12-09T15:32:59,382 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@423aa74a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@423aa74a 2024-12-09T15:32:59,385 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T15:32:59,430 DEBUG [M:0;3651d8ead0f3:39115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3286e56576764d2b9e9b4c2aa3fc6073 is 82, key is hbase:meta,,1/info:regioninfo/1733758284888/Put/seqid=0 2024-12-09T15:32:59,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741855_1031 (size=5672) 2024-12-09T15:32:59,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741855_1031 (size=5672) 2024-12-09T15:32:59,459 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3286e56576764d2b9e9b4c2aa3fc6073 2024-12-09T15:32:59,500 DEBUG [M:0;3651d8ead0f3:39115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/41e655fad3a346f984edfc95fe2bbbdb is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733758286051/Put/seqid=0 2024-12-09T15:32:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741856_1032 (size=6248) 2024-12-09T15:32:59,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741856_1032 (size=6248) 2024-12-09T15:32:59,514 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/41e655fad3a346f984edfc95fe2bbbdb 2024-12-09T15:32:59,526 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 41e655fad3a346f984edfc95fe2bbbdb 2024-12-09T15:32:59,555 DEBUG [M:0;3651d8ead0f3:39115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a5e0ffb6f71459ab505c9078a486394 is 69, key is 3651d8ead0f3,46789,1733758282141/rs:state/1733758284056/Put/seqid=0 2024-12-09T15:32:59,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741857_1033 (size=5156) 2024-12-09T15:32:59,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741857_1033 (size=5156) 2024-12-09T15:32:59,565 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a5e0ffb6f71459ab505c9078a486394 2024-12-09T15:32:59,609 DEBUG [M:0;3651d8ead0f3:39115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cf6d3ea5af2c421c9c71aa957a7067de is 52, key is load_balancer_on/state:d/1733758285289/Put/seqid=0 2024-12-09T15:32:59,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741858_1034 (size=5056) 2024-12-09T15:32:59,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741858_1034 (size=5056) 2024-12-09T15:32:59,622 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cf6d3ea5af2c421c9c71aa957a7067de 2024-12-09T15:32:59,635 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3286e56576764d2b9e9b4c2aa3fc6073 as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3286e56576764d2b9e9b4c2aa3fc6073 2024-12-09T15:32:59,668 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3286e56576764d2b9e9b4c2aa3fc6073, entries=8, sequenceid=59, filesize=5.5 K 2024-12-09T15:32:59,676 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/41e655fad3a346f984edfc95fe2bbbdb as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/41e655fad3a346f984edfc95fe2bbbdb 2024-12-09T15:32:59,688 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 41e655fad3a346f984edfc95fe2bbbdb 2024-12-09T15:32:59,688 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/41e655fad3a346f984edfc95fe2bbbdb, entries=6, sequenceid=59, filesize=6.1 K 2024-12-09T15:32:59,696 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a5e0ffb6f71459ab505c9078a486394 as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a5e0ffb6f71459ab505c9078a486394 2024-12-09T15:32:59,705 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a5e0ffb6f71459ab505c9078a486394, entries=1, sequenceid=59, filesize=5.0 K 2024-12-09T15:32:59,706 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cf6d3ea5af2c421c9c71aa957a7067de as hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cf6d3ea5af2c421c9c71aa957a7067de 2024-12-09T15:32:59,714 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cf6d3ea5af2c421c9c71aa957a7067de, entries=1, sequenceid=59, filesize=4.9 K 2024-12-09T15:32:59,716 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 365ms, sequenceid=59, compaction requested=false 2024-12-09T15:32:59,718 INFO [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:32:59,718 DEBUG [M:0;3651d8ead0f3:39115 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758379351Disabling compacts and flushes for region at 1733758379351Disabling writes for close at 1733758379351Obtaining lock to block concurrent updates at 1733758379352 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733758379352Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1733758379352Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733758379363 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733758379363Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733758379430 (+67 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733758379430Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733758379472 (+42 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733758379499 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733758379499Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733758379527 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733758379554 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733758379554Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733758379576 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733758379608 (+32 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733758379608Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c7d8d98: reopening flushed file at 1733758379633 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bc72e6f: reopening flushed file at 1733758379668 (+35 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a805012: reopening flushed file at 1733758379688 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19e7ab88: reopening flushed file at 1733758379705 (+17 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 365ms, sequenceid=59, compaction requested=false at 1733758379716 (+11 ms)Writing region close event to WAL at 1733758379718 (+2 ms)Closed at 1733758379718 2024-12-09T15:32:59,719 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:59,719 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:59,719 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:59,720 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:59,720 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:32:59,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741830_1006 (size=27985) 2024-12-09T15:32:59,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39699 is added to blk_1073741830_1006 (size=27985) 2024-12-09T15:32:59,724 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:32:59,724 INFO [M:0;3651d8ead0f3:39115 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T15:32:59,724 INFO [M:0;3651d8ead0f3:39115 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39115 2024-12-09T15:32:59,725 INFO [M:0;3651d8ead0f3:39115 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:32:59,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:32:59,828 INFO [M:0;3651d8ead0f3:39115 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:32:59,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39115-0x1012bcf72b10000, quorum=127.0.0.1:63849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:32:59,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:32:59,840 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:32:59,841 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:32:59,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:32:59,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir/,STOPPED} 2024-12-09T15:32:59,844 WARN [BP-1717592979-172.17.0.3-1733758277046 heartbeating to localhost/127.0.0.1:36359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:32:59,844 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:32:59,844 WARN [BP-1717592979-172.17.0.3-1733758277046 heartbeating to localhost/127.0.0.1:36359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1717592979-172.17.0.3-1733758277046 (Datanode Uuid aaa753a5-4082-463f-a236-20f2a4189eb7) service to localhost/127.0.0.1:36359 2024-12-09T15:32:59,844 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:32:59,846 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data3/current/BP-1717592979-172.17.0.3-1733758277046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:32:59,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data4/current/BP-1717592979-172.17.0.3-1733758277046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:32:59,847 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:32:59,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:32:59,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:32:59,852 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:32:59,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:32:59,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir/,STOPPED} 2024-12-09T15:32:59,854 WARN [BP-1717592979-172.17.0.3-1733758277046 heartbeating to localhost/127.0.0.1:36359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:32:59,854 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:32:59,854 WARN [BP-1717592979-172.17.0.3-1733758277046 heartbeating to localhost/127.0.0.1:36359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1717592979-172.17.0.3-1733758277046 (Datanode Uuid 5682fbff-95ff-4421-895e-e0029a532786) service to localhost/127.0.0.1:36359 2024-12-09T15:32:59,854 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:32:59,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data1/current/BP-1717592979-172.17.0.3-1733758277046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:32:59,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/cluster_478f6637-9cff-4513-356e-9157a91969ee/data/data2/current/BP-1717592979-172.17.0.3-1733758277046 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:32:59,856 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:32:59,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:32:59,874 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:32:59,874 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:32:59,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:32:59,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir/,STOPPED} 2024-12-09T15:32:59,885 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T15:32:59,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T15:32:59,965 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36359 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/3651d8ead0f3:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/3651d8ead0f3:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36359 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36359 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@786b8aa1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36359 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/3651d8ead0f3:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36359 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36359 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36359 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36359 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=561 (was 666), ProcessCount=11 (was 11), AvailableMemoryMB=7649 (was 7033) - AvailableMemoryMB LEAK? - 2024-12-09T15:32:59,975 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=561, ProcessCount=11, AvailableMemoryMB=7647 2024-12-09T15:32:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T15:32:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.log.dir so I do NOT create it in target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4 2024-12-09T15:32:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4eaec47b-b7c6-5429-0564-fb1af583c2ab/hadoop.tmp.dir so I do NOT create it in target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4 2024-12-09T15:32:59,976 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb, deleteOnExit=true 2024-12-09T15:32:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T15:32:59,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/test.cache.data in system properties and HBase conf 2024-12-09T15:32:59,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T15:32:59,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir in system properties and HBase conf 2024-12-09T15:32:59,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T15:32:59,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T15:32:59,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T15:32:59,977 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:32:59,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:32:59,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T15:32:59,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/nfs.dump.dir in system properties and HBase conf 2024-12-09T15:32:59,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/java.io.tmpdir in system properties and HBase conf 2024-12-09T15:32:59,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:32:59,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T15:32:59,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T15:33:00,000 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:33:00,103 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:00,110 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:00,123 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:00,123 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:00,124 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:33:00,125 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:00,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b5fac92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:00,136 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:00,336 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e15a6d0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/java.io.tmpdir/jetty-localhost-45429-hadoop-hdfs-3_4_1-tests_jar-_-any-17943696312502130162/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:33:00,339 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2152d149{HTTP/1.1, (http/1.1)}{localhost:45429} 2024-12-09T15:33:00,340 INFO [Time-limited test {}] server.Server(415): Started @105991ms 2024-12-09T15:33:00,359 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:33:00,587 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:00,596 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:00,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:00,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:00,599 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:33:00,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@779c0b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:00,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:00,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36505daf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/java.io.tmpdir/jetty-localhost-40431-hadoop-hdfs-3_4_1-tests_jar-_-any-12729926226292352264/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:00,768 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21e00560{HTTP/1.1, (http/1.1)}{localhost:40431} 2024-12-09T15:33:00,768 INFO [Time-limited test {}] server.Server(415): Started @106420ms 2024-12-09T15:33:00,771 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:00,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:00,840 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:00,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:00,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:00,841 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:33:00,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1410bc86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:00,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:00,924 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data1/current/BP-2001731650-172.17.0.3-1733758380021/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:00,925 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data2/current/BP-2001731650-172.17.0.3-1733758380021/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:00,952 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:00,956 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf290c5fa41ae626 with lease ID 0x23678dfab14e963c: Processing first storage report for DS-82fd7601-2520-4e69-be3b-5d58ff94fd9c from datanode DatanodeRegistration(127.0.0.1:38353, datanodeUuid=ea9211a5-d4a3-45ed-aae6-241df7285a02, infoPort=33463, infoSecurePort=0, ipcPort=41823, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021) 2024-12-09T15:33:00,956 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf290c5fa41ae626 with lease ID 0x23678dfab14e963c: from storage DS-82fd7601-2520-4e69-be3b-5d58ff94fd9c node DatanodeRegistration(127.0.0.1:38353, datanodeUuid=ea9211a5-d4a3-45ed-aae6-241df7285a02, infoPort=33463, infoSecurePort=0, ipcPort=41823, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:00,956 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf290c5fa41ae626 with lease ID 0x23678dfab14e963c: Processing first storage report for DS-af22bc3b-67de-4b42-8317-a213f30fc289 from datanode DatanodeRegistration(127.0.0.1:38353, datanodeUuid=ea9211a5-d4a3-45ed-aae6-241df7285a02, infoPort=33463, infoSecurePort=0, ipcPort=41823, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021) 2024-12-09T15:33:00,957 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf290c5fa41ae626 with lease ID 0x23678dfab14e963c: from storage DS-af22bc3b-67de-4b42-8317-a213f30fc289 node DatanodeRegistration(127.0.0.1:38353, datanodeUuid=ea9211a5-d4a3-45ed-aae6-241df7285a02, infoPort=33463, infoSecurePort=0, ipcPort=41823, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:00,987 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@af33574{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/java.io.tmpdir/jetty-localhost-42405-hadoop-hdfs-3_4_1-tests_jar-_-any-7914164995504147767/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:00,987 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17d00685{HTTP/1.1, (http/1.1)}{localhost:42405} 2024-12-09T15:33:00,988 INFO [Time-limited test {}] server.Server(415): Started @106639ms 2024-12-09T15:33:00,990 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:01,134 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data3/current/BP-2001731650-172.17.0.3-1733758380021/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:01,151 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data4/current/BP-2001731650-172.17.0.3-1733758380021/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:01,197 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdeb93deee858f with lease ID 0x23678dfab14e963d: Processing first storage report for DS-b0ed277a-da79-44f5-96c6-4e3b09e37a1b from datanode DatanodeRegistration(127.0.0.1:42025, datanodeUuid=a86ae306-d613-47ef-b678-318ce0aaf2ae, infoPort=33175, infoSecurePort=0, ipcPort=41067, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021) 2024-12-09T15:33:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdeb93deee858f with lease ID 0x23678dfab14e963d: from storage DS-b0ed277a-da79-44f5-96c6-4e3b09e37a1b node DatanodeRegistration(127.0.0.1:42025, datanodeUuid=a86ae306-d613-47ef-b678-318ce0aaf2ae, infoPort=33175, infoSecurePort=0, ipcPort=41067, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdeb93deee858f with lease ID 0x23678dfab14e963d: Processing first storage report for DS-e3148949-caa1-4a02-bb46-8e330bca5218 from datanode DatanodeRegistration(127.0.0.1:42025, datanodeUuid=a86ae306-d613-47ef-b678-318ce0aaf2ae, infoPort=33175, infoSecurePort=0, ipcPort=41067, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021) 2024-12-09T15:33:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdeb93deee858f with lease ID 0x23678dfab14e963d: from storage DS-e3148949-caa1-4a02-bb46-8e330bca5218 node DatanodeRegistration(127.0.0.1:42025, datanodeUuid=a86ae306-d613-47ef-b678-318ce0aaf2ae, infoPort=33175, infoSecurePort=0, ipcPort=41067, storageInfo=lv=-57;cid=testClusterID;nsid=1218516143;c=1733758380021), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:01,234 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4 2024-12-09T15:33:01,255 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/zookeeper_0, clientPort=58671, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T15:33:01,267 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58671 2024-12-09T15:33:01,268 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:01,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:33:01,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:33:01,316 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09 with version=8 2024-12-09T15:33:01,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase-staging 2024-12-09T15:33:01,319 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:33:01,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:01,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:01,320 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:33:01,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:01,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:33:01,320 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T15:33:01,320 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:33:01,323 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34463 2024-12-09T15:33:01,324 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34463 connecting to ZooKeeper ensemble=127.0.0.1:58671 2024-12-09T15:33:01,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:344630x0, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:33:01,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34463-0x1012bd0fce30000 connected 2024-12-09T15:33:01,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:01,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:01,373 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:01,373 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09, hbase.cluster.distributed=false 2024-12-09T15:33:01,380 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:33:01,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34463 2024-12-09T15:33:01,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34463 2024-12-09T15:33:01,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34463 2024-12-09T15:33:01,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34463 2024-12-09T15:33:01,390 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34463 2024-12-09T15:33:01,416 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:33:01,416 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:01,416 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:01,416 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:33:01,416 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:01,416 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:33:01,416 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:33:01,416 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:33:01,417 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41349 2024-12-09T15:33:01,419 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41349 connecting to ZooKeeper ensemble=127.0.0.1:58671 2024-12-09T15:33:01,420 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:01,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:01,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413490x0, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:01,437 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:33:01,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413490x0, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:33:01,454 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:33:01,457 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413490x0, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T15:33:01,463 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41349-0x1012bd0fce30001 connected 2024-12-09T15:33:01,475 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:33:01,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41349 2024-12-09T15:33:01,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41349 2024-12-09T15:33:01,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41349 2024-12-09T15:33:01,518 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41349 2024-12-09T15:33:01,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41349 2024-12-09T15:33:01,542 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3651d8ead0f3:34463 2024-12-09T15:33:01,543 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:01,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:01,548 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:01,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:01,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:01,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T15:33:01,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:01,559 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:33:01,560 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3651d8ead0f3,34463,1733758381319 from backup master directory 2024-12-09T15:33:01,567 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:33:01,567 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:01,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:01,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:01,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:01,584 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/hbase.id] with ID: 9da324c3-ce31-46b0-b3d6-297e08e5bbef 2024-12-09T15:33:01,584 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/.tmp/hbase.id 2024-12-09T15:33:01,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:33:01,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:33:01,597 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/.tmp/hbase.id]:[hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/hbase.id] 2024-12-09T15:33:01,619 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:01,619 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T15:33:01,624 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 4ms. 2024-12-09T15:33:01,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:01,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:01,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:33:01,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:33:01,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:33:01,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:33:01,691 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T15:33:01,692 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T15:33:02,053 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:33:02,054 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T15:33:02,058 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:33:02,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:33:02,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:33:02,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store 2024-12-09T15:33:02,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:33:02,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:33:02,965 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:02,965 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:33:02,965 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:02,965 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:02,965 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:33:02,965 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:02,965 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:02,966 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758382965Disabling compacts and flushes for region at 1733758382965Disabling writes for close at 1733758382965Writing region close event to WAL at 1733758382965Closed at 1733758382965 2024-12-09T15:33:02,968 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/.initializing 2024-12-09T15:33:02,968 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/WALs/3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:02,972 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C34463%2C1733758381319, suffix=, logDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/WALs/3651d8ead0f3,34463,1733758381319, archiveDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/oldWALs, maxLogs=10 2024-12-09T15:33:02,973 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34463%2C1733758381319.1733758382973 2024-12-09T15:33:03,004 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/WALs/3651d8ead0f3,34463,1733758381319/3651d8ead0f3%2C34463%2C1733758381319.1733758382973 2024-12-09T15:33:03,012 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33175:33175),(127.0.0.1/127.0.0.1:33463:33463)] 2024-12-09T15:33:03,030 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:33:03,031 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:03,031 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,031 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T15:33:03,048 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:03,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T15:33:03,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:33:03,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T15:33:03,065 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:33:03,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T15:33:03,071 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:33:03,072 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,075 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,076 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,078 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,079 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,079 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T15:33:03,081 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:03,087 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:33:03,088 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788747, jitterRate=0.0029441267251968384}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T15:33:03,089 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733758383031Initializing all the Stores at 1733758383039 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758383040 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758383042 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758383042Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758383042Cleaning up temporary data from old regions at 1733758383079 (+37 ms)Region opened successfully at 1733758383089 (+10 ms) 2024-12-09T15:33:03,104 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T15:33:03,112 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78c94276, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:33:03,114 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T15:33:03,114 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T15:33:03,114 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T15:33:03,115 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T15:33:03,116 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T15:33:03,116 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T15:33:03,116 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T15:33:03,122 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T15:33:03,124 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T15:33:03,126 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T15:33:03,127 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T15:33:03,128 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T15:33:03,130 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T15:33:03,130 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T15:33:03,135 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T15:33:03,143 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T15:33:03,151 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T15:33:03,160 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T15:33:03,179 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T15:33:03,186 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T15:33:03,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:03,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:03,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:03,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:03,210 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3651d8ead0f3,34463,1733758381319, sessionid=0x1012bd0fce30000, setting cluster-up flag (Was=false) 2024-12-09T15:33:03,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:03,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:03,286 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T15:33:03,313 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:03,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:03,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:03,328 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T15:33:03,332 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:03,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:03,342 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T15:33:03,351 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:03,351 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T15:33:03,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:03,351 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T15:33:03,352 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3651d8ead0f3,34463,1733758381319 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T15:33:03,359 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:03,359 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:03,359 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:03,360 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:03,360 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3651d8ead0f3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T15:33:03,360 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,360 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:33:03,360 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,400 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:03,400 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T15:33:03,401 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733758413401 2024-12-09T15:33:03,401 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T15:33:03,401 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T15:33:03,401 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T15:33:03,401 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T15:33:03,402 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T15:33:03,402 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T15:33:03,408 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,411 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,411 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T15:33:03,412 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T15:33:03,413 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T15:33:03,413 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T15:33:03,414 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T15:33:03,414 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T15:33:03,417 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758383415,5,FailOnTimeoutGroup] 2024-12-09T15:33:03,417 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758383417,5,FailOnTimeoutGroup] 2024-12-09T15:33:03,417 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,418 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T15:33:03,418 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,418 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,441 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(746): ClusterId : 9da324c3-ce31-46b0-b3d6-297e08e5bbef 2024-12-09T15:33:03,441 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:33:03,444 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:33:03,444 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:33:03,448 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:33:03,449 DEBUG [RS:0;3651d8ead0f3:41349 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@196aed08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:33:03,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:33:03,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:33:03,476 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T15:33:03,476 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09 2024-12-09T15:33:03,514 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3651d8ead0f3:41349 2024-12-09T15:33:03,514 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:33:03,514 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:33:03,514 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:33:03,516 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,34463,1733758381319 with port=41349, startcode=1733758381415 2024-12-09T15:33:03,517 DEBUG [RS:0;3651d8ead0f3:41349 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:33:03,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:33:03,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:33:03,545 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:03,560 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49193, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:33:03,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34463 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:03,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34463 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:03,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:33:03,579 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:33:03,579 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:03,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:33:03,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:33:03,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:03,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:33:03,604 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:33:03,604 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,605 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09 2024-12-09T15:33:03,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:03,605 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33363 2024-12-09T15:33:03,605 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:33:03,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:33:03,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:33:03,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:33:03,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:03,620 DEBUG [RS:0;3651d8ead0f3:41349 {}] zookeeper.ZKUtil(111): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:03,620 WARN [RS:0;3651d8ead0f3:41349 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:33:03,620 INFO [RS:0;3651d8ead0f3:41349 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:33:03,620 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/WALs/3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:03,621 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,41349,1733758381415] 2024-12-09T15:33:03,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:03,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:33:03,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740 2024-12-09T15:33:03,624 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740 2024-12-09T15:33:03,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:33:03,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:33:03,628 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:33:03,631 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:33:03,638 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:33:03,640 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:33:03,646 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:33:03,647 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839446, jitterRate=0.06741155683994293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:33:03,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733758383545Initializing all the Stores at 1733758383552 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758383552Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758383567 (+15 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758383567Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758383567Cleaning up temporary data from old regions at 1733758383627 (+60 ms)Region opened successfully at 1733758383648 (+21 ms) 2024-12-09T15:33:03,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:33:03,649 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:33:03,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:33:03,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:33:03,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:33:03,663 INFO [RS:0;3651d8ead0f3:41349 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:33:03,664 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,679 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:33:03,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758383648Disabling compacts and flushes for region at 1733758383648Disabling writes for close at 1733758383649 (+1 ms)Writing region close event to WAL at 1733758383679 (+30 ms)Closed at 1733758383679 2024-12-09T15:33:03,681 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:03,682 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T15:33:03,682 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T15:33:03,682 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:33:03,685 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:33:03,688 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:33:03,688 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,688 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,688 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,688 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:03,689 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:33:03,690 DEBUG [RS:0;3651d8ead0f3:41349 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:33:03,690 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T15:33:03,700 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,700 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,700 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,700 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,700 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,700 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,41349,1733758381415-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:33:03,722 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:33:03,722 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,41349,1733758381415-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,722 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,723 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.Replication(171): 3651d8ead0f3,41349,1733758381415 started 2024-12-09T15:33:03,739 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:03,740 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,41349,1733758381415, RpcServer on 3651d8ead0f3/172.17.0.3:41349, sessionid=0x1012bd0fce30001 2024-12-09T15:33:03,740 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:33:03,740 DEBUG [RS:0;3651d8ead0f3:41349 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:03,740 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,41349,1733758381415' 2024-12-09T15:33:03,740 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:33:03,741 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:33:03,743 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:33:03,743 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:33:03,743 DEBUG [RS:0;3651d8ead0f3:41349 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:03,743 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,41349,1733758381415' 2024-12-09T15:33:03,743 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:33:03,749 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:33:03,750 DEBUG [RS:0;3651d8ead0f3:41349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:33:03,750 INFO [RS:0;3651d8ead0f3:41349 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:33:03,750 INFO [RS:0;3651d8ead0f3:41349 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:33:03,841 WARN [3651d8ead0f3:34463 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T15:33:03,853 INFO [RS:0;3651d8ead0f3:41349 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C41349%2C1733758381415, suffix=, logDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/WALs/3651d8ead0f3,41349,1733758381415, archiveDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/oldWALs, maxLogs=32 2024-12-09T15:33:03,856 INFO [RS:0;3651d8ead0f3:41349 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C41349%2C1733758381415.1733758383855 2024-12-09T15:33:03,868 INFO [RS:0;3651d8ead0f3:41349 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/WALs/3651d8ead0f3,41349,1733758381415/3651d8ead0f3%2C41349%2C1733758381415.1733758383855 2024-12-09T15:33:03,873 DEBUG [RS:0;3651d8ead0f3:41349 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33463:33463),(127.0.0.1/127.0.0.1:33175:33175)] 2024-12-09T15:33:03,895 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:33:03,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:03,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:04,091 DEBUG [3651d8ead0f3:34463 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T15:33:04,092 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:04,094 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,41349,1733758381415, state=OPENING 2024-12-09T15:33:04,097 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T15:33:04,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:04,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:04,100 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:04,100 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:04,100 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:33:04,101 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,41349,1733758381415}] 2024-12-09T15:33:04,257 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T15:33:04,261 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47767, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T15:33:04,271 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T15:33:04,271 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:33:04,274 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C41349%2C1733758381415.meta, suffix=.meta, logDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/WALs/3651d8ead0f3,41349,1733758381415, archiveDir=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/oldWALs, maxLogs=32 2024-12-09T15:33:04,277 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C41349%2C1733758381415.meta.1733758384276.meta 2024-12-09T15:33:04,302 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/WALs/3651d8ead0f3,41349,1733758381415/3651d8ead0f3%2C41349%2C1733758381415.meta.1733758384276.meta 2024-12-09T15:33:04,306 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33175:33175),(127.0.0.1/127.0.0.1:33463:33463)] 2024-12-09T15:33:04,315 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:33:04,315 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T15:33:04,316 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T15:33:04,316 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T15:33:04,316 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T15:33:04,316 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:04,316 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T15:33:04,316 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T15:33:04,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:33:04,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:33:04,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:04,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:04,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:33:04,336 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:33:04,336 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:04,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:04,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:33:04,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:33:04,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:04,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:04,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:33:04,343 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:33:04,343 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:04,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:04,344 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:33:04,346 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740 2024-12-09T15:33:04,348 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740 2024-12-09T15:33:04,350 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:33:04,350 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:33:04,351 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:33:04,353 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:33:04,355 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763425, jitterRate=-0.029255107045173645}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:33:04,355 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T15:33:04,356 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733758384317Writing region info on filesystem at 1733758384317Initializing all the Stores at 1733758384319 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758384319Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758384330 (+11 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758384330Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758384330Cleaning up temporary data from old regions at 1733758384350 (+20 ms)Running coprocessor post-open hooks at 1733758384355 (+5 ms)Region opened successfully at 1733758384356 (+1 ms) 2024-12-09T15:33:04,358 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733758384257 2024-12-09T15:33:04,373 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T15:33:04,374 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:04,374 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T15:33:04,376 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,41349,1733758381415, state=OPEN 2024-12-09T15:33:04,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:33:04,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:33:04,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:04,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:04,383 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:04,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T15:33:04,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,41349,1733758381415 in 282 msec 2024-12-09T15:33:04,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T15:33:04,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 712 msec 2024-12-09T15:33:04,400 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:04,400 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T15:33:04,410 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:33:04,410 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,41349,1733758381415, seqNum=-1] 2024-12-09T15:33:04,410 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:33:04,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59609, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:33:04,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1040 sec 2024-12-09T15:33:04,451 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733758384451, completionTime=-1 2024-12-09T15:33:04,451 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T15:33:04,451 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T15:33:04,455 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T15:33:04,455 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733758444455 2024-12-09T15:33:04,455 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733758504455 2024-12-09T15:33:04,455 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-09T15:33:04,456 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34463,1733758381319-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:04,456 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34463,1733758381319-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:04,456 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34463,1733758381319-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:04,456 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3651d8ead0f3:34463, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:04,456 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:04,464 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:04,466 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T15:33:04,473 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.905sec 2024-12-09T15:33:04,473 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T15:33:04,473 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T15:33:04,473 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T15:33:04,473 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T15:33:04,474 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T15:33:04,474 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34463,1733758381319-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:33:04,474 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34463,1733758381319-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T15:33:04,504 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T15:33:04,504 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T15:33:04,504 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34463,1733758381319-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:04,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@222181c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:33:04,547 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3651d8ead0f3,34463,-1 for getting cluster id 2024-12-09T15:33:04,547 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T15:33:04,561 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9da324c3-ce31-46b0-b3d6-297e08e5bbef' 2024-12-09T15:33:04,563 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T15:33:04,564 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9da324c3-ce31-46b0-b3d6-297e08e5bbef" 2024-12-09T15:33:04,564 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f2a6128, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:33:04,564 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3651d8ead0f3,34463,-1] 2024-12-09T15:33:04,565 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T15:33:04,570 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:04,575 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T15:33:04,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6512930c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:33:04,580 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:33:04,587 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,41349,1733758381415, seqNum=-1] 2024-12-09T15:33:04,587 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:33:04,601 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36226, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:33:04,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:04,607 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:04,620 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T15:33:04,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T15:33:04,621 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:33:04,621 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:33:04,621 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:04,621 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:04,621 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T15:33:04,622 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=274994465, stopped=false 2024-12-09T15:33:04,622 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3651d8ead0f3,34463,1733758381319 2024-12-09T15:33:04,622 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T15:33:04,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:04,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:04,631 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:04,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:04,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:04,635 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:33:04,635 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:33:04,636 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:33:04,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:04,637 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,41349,1733758381415' ***** 2024-12-09T15:33:04,637 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:33:04,638 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:33:04,639 INFO [RS:0;3651d8ead0f3:41349 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:33:04,639 INFO [RS:0;3651d8ead0f3:41349 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:33:04,639 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:04,639 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:33:04,639 INFO [RS:0;3651d8ead0f3:41349 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3651d8ead0f3:41349. 2024-12-09T15:33:04,639 DEBUG [RS:0;3651d8ead0f3:41349 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:33:04,639 DEBUG [RS:0;3651d8ead0f3:41349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:04,639 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:33:04,640 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:33:04,640 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:33:04,640 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T15:33:04,642 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:33:04,650 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T15:33:04,650 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T15:33:04,650 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:04,650 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:33:04,651 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:33:04,651 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:33:04,651 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:33:04,651 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:33:04,651 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:33:04,651 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T15:33:04,701 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T15:33:04,701 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T15:33:04,712 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740/.tmp/ns/ff6ca32d959b4eb49c6c690cf10c1b4c is 43, key is default/ns:d/1733758384416/Put/seqid=0 2024-12-09T15:33:04,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741835_1011 (size=5153) 2024-12-09T15:33:04,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741835_1011 (size=5153) 2024-12-09T15:33:04,792 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740/.tmp/ns/ff6ca32d959b4eb49c6c690cf10c1b4c 2024-12-09T15:33:04,824 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740/.tmp/ns/ff6ca32d959b4eb49c6c690cf10c1b4c as hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740/ns/ff6ca32d959b4eb49c6c690cf10c1b4c 2024-12-09T15:33:04,849 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740/ns/ff6ca32d959b4eb49c6c690cf10c1b4c, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T15:33:04,854 DEBUG [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:33:04,854 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 203ms, sequenceid=6, compaction requested=false 2024-12-09T15:33:04,862 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T15:33:04,887 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T15:33:04,888 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:33:04,888 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:33:04,888 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758384651Running coprocessor pre-close hooks at 1733758384651Disabling compacts and flushes for region at 1733758384651Disabling writes for close at 1733758384651Obtaining lock to block concurrent updates at 1733758384651Preparing flush snapshotting stores in 1588230740 at 1733758384651Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733758384652 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733758384659 (+7 ms)Flushing 1588230740/ns: creating writer at 1733758384659Flushing 1588230740/ns: appending metadata at 1733758384700 (+41 ms)Flushing 1588230740/ns: closing flushed file at 1733758384700Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f37d699: reopening flushed file at 1733758384820 (+120 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 203ms, sequenceid=6, compaction requested=false at 1733758384862 (+42 ms)Writing region close event to WAL at 1733758384876 (+14 ms)Running coprocessor post-close hooks at 1733758384888 (+12 ms)Closed at 1733758384888 2024-12-09T15:33:04,889 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T15:33:05,055 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,41349,1733758381415; all regions closed. 2024-12-09T15:33:05,056 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,056 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,056 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,056 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,057 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741834_1010 (size=1152) 2024-12-09T15:33:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741834_1010 (size=1152) 2024-12-09T15:33:05,066 DEBUG [RS:0;3651d8ead0f3:41349 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/oldWALs 2024-12-09T15:33:05,066 INFO [RS:0;3651d8ead0f3:41349 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C41349%2C1733758381415.meta:.meta(num 1733758384276) 2024-12-09T15:33:05,067 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,067 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,067 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,068 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,068 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741833_1009 (size=93) 2024-12-09T15:33:05,074 DEBUG [RS:0;3651d8ead0f3:41349 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/oldWALs 2024-12-09T15:33:05,074 INFO [RS:0;3651d8ead0f3:41349 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C41349%2C1733758381415:(num 1733758383855) 2024-12-09T15:33:05,074 DEBUG [RS:0;3651d8ead0f3:41349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:05,075 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:33:05,075 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:33:05,075 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T15:33:05,075 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:33:05,075 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:33:05,075 INFO [RS:0;3651d8ead0f3:41349 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41349 2024-12-09T15:33:05,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741833_1009 (size=93) 2024-12-09T15:33:05,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:33:05,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,41349,1733758381415 2024-12-09T15:33:05,078 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:33:05,080 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,41349,1733758381415] 2024-12-09T15:33:05,082 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,41349,1733758381415 already deleted, retry=false 2024-12-09T15:33:05,082 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,41349,1733758381415 expired; onlineServers=0 2024-12-09T15:33:05,082 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3651d8ead0f3,34463,1733758381319' ***** 2024-12-09T15:33:05,083 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T15:33:05,083 INFO [M:0;3651d8ead0f3:34463 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:33:05,083 INFO [M:0;3651d8ead0f3:34463 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:33:05,083 DEBUG [M:0;3651d8ead0f3:34463 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T15:33:05,083 DEBUG [M:0;3651d8ead0f3:34463 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T15:33:05,083 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T15:33:05,083 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758383417 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758383417,5,FailOnTimeoutGroup] 2024-12-09T15:33:05,083 INFO [M:0;3651d8ead0f3:34463 {}] hbase.ChoreService(370): Chore service for: master/3651d8ead0f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T15:33:05,083 INFO [M:0;3651d8ead0f3:34463 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:33:05,084 DEBUG [M:0;3651d8ead0f3:34463 {}] master.HMaster(1795): Stopping service threads 2024-12-09T15:33:05,084 INFO [M:0;3651d8ead0f3:34463 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T15:33:05,084 INFO [M:0;3651d8ead0f3:34463 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:33:05,084 INFO [M:0;3651d8ead0f3:34463 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T15:33:05,085 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T15:33:05,086 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758383415 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758383415,5,FailOnTimeoutGroup] 2024-12-09T15:33:05,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T15:33:05,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:05,087 DEBUG [M:0;3651d8ead0f3:34463 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T15:33:05,087 DEBUG [M:0;3651d8ead0f3:34463 {}] master.ActiveMasterManager(353): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T15:33:05,088 INFO [M:0;3651d8ead0f3:34463 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/.lastflushedseqids 2024-12-09T15:33:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741836_1012 (size=99) 2024-12-09T15:33:05,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741836_1012 (size=99) 2024-12-09T15:33:05,106 INFO [M:0;3651d8ead0f3:34463 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T15:33:05,106 INFO [M:0;3651d8ead0f3:34463 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T15:33:05,106 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:33:05,106 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:05,106 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:05,106 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:33:05,106 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:05,106 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T15:33:05,137 DEBUG [M:0;3651d8ead0f3:34463 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0d63fe6295e146c58713dc7b3ca955cd is 82, key is hbase:meta,,1/info:regioninfo/1733758384373/Put/seqid=0 2024-12-09T15:33:05,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741837_1013 (size=5672) 2024-12-09T15:33:05,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741837_1013 (size=5672) 2024-12-09T15:33:05,163 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0d63fe6295e146c58713dc7b3ca955cd 2024-12-09T15:33:05,182 INFO [RS:0;3651d8ead0f3:41349 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:33:05,182 INFO [RS:0;3651d8ead0f3:41349 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,41349,1733758381415; zookeeper connection closed. 2024-12-09T15:33:05,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:05,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41349-0x1012bd0fce30001, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:05,184 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@78da550c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@78da550c 2024-12-09T15:33:05,190 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T15:33:05,206 DEBUG [M:0;3651d8ead0f3:34463 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3f7b29b2eeb9403b9fc79c4f313699c2 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733758384449/Put/seqid=0 2024-12-09T15:33:05,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741838_1014 (size=5275) 2024-12-09T15:33:05,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741838_1014 (size=5275) 2024-12-09T15:33:05,236 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3f7b29b2eeb9403b9fc79c4f313699c2 2024-12-09T15:33:05,275 DEBUG [M:0;3651d8ead0f3:34463 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f9cc625734b847878aef985584ada162 is 69, key is 3651d8ead0f3,41349,1733758381415/rs:state/1733758383562/Put/seqid=0 2024-12-09T15:33:05,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741839_1015 (size=5156) 2024-12-09T15:33:05,301 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f9cc625734b847878aef985584ada162 2024-12-09T15:33:05,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741839_1015 (size=5156) 2024-12-09T15:33:05,338 DEBUG [M:0;3651d8ead0f3:34463 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c96e1f3daa6843fd80205a1cac66a62a is 52, key is load_balancer_on/state:d/1733758384609/Put/seqid=0 2024-12-09T15:33:05,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741840_1016 (size=5056) 2024-12-09T15:33:05,353 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c96e1f3daa6843fd80205a1cac66a62a 2024-12-09T15:33:05,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741840_1016 (size=5056) 2024-12-09T15:33:05,370 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0d63fe6295e146c58713dc7b3ca955cd as hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0d63fe6295e146c58713dc7b3ca955cd 2024-12-09T15:33:05,385 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0d63fe6295e146c58713dc7b3ca955cd, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T15:33:05,387 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3f7b29b2eeb9403b9fc79c4f313699c2 as hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3f7b29b2eeb9403b9fc79c4f313699c2 2024-12-09T15:33:05,404 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3f7b29b2eeb9403b9fc79c4f313699c2, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T15:33:05,411 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f9cc625734b847878aef985584ada162 as hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f9cc625734b847878aef985584ada162 2024-12-09T15:33:05,431 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f9cc625734b847878aef985584ada162, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T15:33:05,435 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c96e1f3daa6843fd80205a1cac66a62a as hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c96e1f3daa6843fd80205a1cac66a62a 2024-12-09T15:33:05,448 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33363/user/jenkins/test-data/a7cf2c96-d089-a029-bf19-33ffa1ed0e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c96e1f3daa6843fd80205a1cac66a62a, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T15:33:05,450 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 344ms, sequenceid=29, compaction requested=false 2024-12-09T15:33:05,462 INFO [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:05,462 DEBUG [M:0;3651d8ead0f3:34463 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758385106Disabling compacts and flushes for region at 1733758385106Disabling writes for close at 1733758385106Obtaining lock to block concurrent updates at 1733758385106Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733758385106Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733758385107 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733758385110 (+3 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733758385110Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733758385136 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733758385136Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733758385172 (+36 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733758385205 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733758385205Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733758385247 (+42 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733758385274 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733758385274Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733758385311 (+37 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733758385337 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733758385337Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e3f00ad: reopening flushed file at 1733758385368 (+31 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5153d726: reopening flushed file at 1733758385385 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6954322a: reopening flushed file at 1733758385404 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55dce49d: reopening flushed file at 1733758385431 (+27 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 344ms, sequenceid=29, compaction requested=false at 1733758385450 (+19 ms)Writing region close event to WAL at 1733758385462 (+12 ms)Closed at 1733758385462 2024-12-09T15:33:05,464 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,464 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,471 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,471 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741830_1006 (size=10311) 2024-12-09T15:33:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38353 is added to blk_1073741830_1006 (size=10311) 2024-12-09T15:33:05,483 INFO [M:0;3651d8ead0f3:34463 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T15:33:05,483 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:33:05,483 INFO [M:0;3651d8ead0f3:34463 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34463 2024-12-09T15:33:05,484 INFO [M:0;3651d8ead0f3:34463 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:33:05,586 INFO [M:0;3651d8ead0f3:34463 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:33:05,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:05,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34463-0x1012bd0fce30000, quorum=127.0.0.1:58671, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:05,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@af33574{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:05,595 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17d00685{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:33:05,595 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:33:05,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:33:05,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1410bc86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir/,STOPPED} 2024-12-09T15:33:05,603 WARN [BP-2001731650-172.17.0.3-1733758380021 heartbeating to localhost/127.0.0.1:33363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:33:05,603 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:33:05,603 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:33:05,603 WARN [BP-2001731650-172.17.0.3-1733758380021 heartbeating to localhost/127.0.0.1:33363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2001731650-172.17.0.3-1733758380021 (Datanode Uuid a86ae306-d613-47ef-b678-318ce0aaf2ae) service to localhost/127.0.0.1:33363 2024-12-09T15:33:05,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data3/current/BP-2001731650-172.17.0.3-1733758380021 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:05,605 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data4/current/BP-2001731650-172.17.0.3-1733758380021 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:05,605 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:33:05,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36505daf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:05,620 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21e00560{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:33:05,620 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:33:05,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:33:05,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@779c0b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir/,STOPPED} 2024-12-09T15:33:05,632 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:33:05,632 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:33:05,633 WARN [BP-2001731650-172.17.0.3-1733758380021 heartbeating to localhost/127.0.0.1:33363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:33:05,633 WARN [BP-2001731650-172.17.0.3-1733758380021 heartbeating to localhost/127.0.0.1:33363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2001731650-172.17.0.3-1733758380021 (Datanode Uuid ea9211a5-d4a3-45ed-aae6-241df7285a02) service to localhost/127.0.0.1:33363 2024-12-09T15:33:05,634 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data1/current/BP-2001731650-172.17.0.3-1733758380021 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:05,636 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/cluster_66bd8b78-d2bb-be62-81d8-21848a7c58bb/data/data2/current/BP-2001731650-172.17.0.3-1733758380021 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:05,636 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:33:05,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e15a6d0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:33:05,660 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2152d149{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:33:05,660 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:33:05,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:33:05,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b5fac92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir/,STOPPED} 2024-12-09T15:33:05,680 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T15:33:05,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T15:33:05,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T15:33:05,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.log.dir so I do NOT create it in target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286 2024-12-09T15:33:05,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b46dbd27-8d31-7f77-ff3c-7ea15a98b6e4/hadoop.tmp.dir so I do NOT create it in target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286 2024-12-09T15:33:05,705 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9, deleteOnExit=true 2024-12-09T15:33:05,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T15:33:05,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/test.cache.data in system properties and HBase conf 2024-12-09T15:33:05,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T15:33:05,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir in system properties and HBase conf 2024-12-09T15:33:05,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T15:33:05,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T15:33:05,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T15:33:05,706 INFO [regionserver/3651d8ead0f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:33:05,706 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T15:33:05,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:33:05,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:33:05,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T15:33:05,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:33:05,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T15:33:05,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T15:33:05,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:33:05,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:33:05,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T15:33:05,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/nfs.dump.dir in system properties and HBase conf 2024-12-09T15:33:05,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir in system properties and HBase conf 2024-12-09T15:33:05,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:33:05,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T15:33:05,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T15:33:05,732 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:33:05,829 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:05,835 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:05,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:05,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:05,838 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:33:05,839 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:05,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6de86657{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:05,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14d09ab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:05,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72f8e821{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir/jetty-localhost-40123-hadoop-hdfs-3_4_1-tests_jar-_-any-15720916849073930271/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:33:05,976 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e26ba04{HTTP/1.1, (http/1.1)}{localhost:40123} 2024-12-09T15:33:05,976 INFO [Time-limited test {}] server.Server(415): Started @111628ms 2024-12-09T15:33:05,992 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:33:06,101 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:06,107 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:06,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:06,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:06,108 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:33:06,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2693ea96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:06,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@616f4c13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:06,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@200d0f88{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir/jetty-localhost-39927-hadoop-hdfs-3_4_1-tests_jar-_-any-1286520228178511682/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:06,270 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4743e1dd{HTTP/1.1, (http/1.1)}{localhost:39927} 2024-12-09T15:33:06,271 INFO [Time-limited test {}] server.Server(415): Started @111922ms 2024-12-09T15:33:06,273 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:06,330 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:06,334 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:06,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:06,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:06,341 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:33:06,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45dc61f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:06,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b9ab422{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:06,407 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data1/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:06,407 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data2/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:06,439 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf946d1c885594fe5 with lease ID 0xf7a85b8dadcc3b36: Processing first storage report for DS-0bfae810-8754-461c-a55d-3954b9a63e73 from datanode DatanodeRegistration(127.0.0.1:39255, datanodeUuid=f6d4c26d-7046-48fd-a2a7-59cbe335c559, infoPort=36715, infoSecurePort=0, ipcPort=39353, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf946d1c885594fe5 with lease ID 0xf7a85b8dadcc3b36: from storage DS-0bfae810-8754-461c-a55d-3954b9a63e73 node DatanodeRegistration(127.0.0.1:39255, datanodeUuid=f6d4c26d-7046-48fd-a2a7-59cbe335c559, infoPort=36715, infoSecurePort=0, ipcPort=39353, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf946d1c885594fe5 with lease ID 0xf7a85b8dadcc3b36: Processing first storage report for DS-e4b383ca-11c6-42dd-9918-7411522ee146 from datanode DatanodeRegistration(127.0.0.1:39255, datanodeUuid=f6d4c26d-7046-48fd-a2a7-59cbe335c559, infoPort=36715, infoSecurePort=0, ipcPort=39353, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf946d1c885594fe5 with lease ID 0xf7a85b8dadcc3b36: from storage DS-e4b383ca-11c6-42dd-9918-7411522ee146 node DatanodeRegistration(127.0.0.1:39255, datanodeUuid=f6d4c26d-7046-48fd-a2a7-59cbe335c559, infoPort=36715, infoSecurePort=0, ipcPort=39353, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:06,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47d1c4f2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir/jetty-localhost-43119-hadoop-hdfs-3_4_1-tests_jar-_-any-8510976252329486428/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:06,487 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e10767c{HTTP/1.1, (http/1.1)}{localhost:43119} 2024-12-09T15:33:06,488 INFO [Time-limited test {}] server.Server(415): Started @112139ms 2024-12-09T15:33:06,490 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:06,606 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data3/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:06,607 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data4/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:06,631 WARN [Thread-673 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:06,637 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb38edefd090a8945 with lease ID 0xf7a85b8dadcc3b37: Processing first storage report for DS-2f8f3119-b715-4d10-a31f-b2493621b334 from datanode DatanodeRegistration(127.0.0.1:44637, datanodeUuid=5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd, infoPort=41283, infoSecurePort=0, ipcPort=33797, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:06,638 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb38edefd090a8945 with lease ID 0xf7a85b8dadcc3b37: from storage DS-2f8f3119-b715-4d10-a31f-b2493621b334 node DatanodeRegistration(127.0.0.1:44637, datanodeUuid=5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd, infoPort=41283, infoSecurePort=0, ipcPort=33797, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:33:06,638 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb38edefd090a8945 with lease ID 0xf7a85b8dadcc3b37: Processing first storage report for DS-4b0840f3-828e-40c8-84de-b47d95ca562f from datanode DatanodeRegistration(127.0.0.1:44637, datanodeUuid=5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd, infoPort=41283, infoSecurePort=0, ipcPort=33797, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:06,638 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb38edefd090a8945 with lease ID 0xf7a85b8dadcc3b37: from storage DS-4b0840f3-828e-40c8-84de-b47d95ca562f node DatanodeRegistration(127.0.0.1:44637, datanodeUuid=5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd, infoPort=41283, infoSecurePort=0, ipcPort=33797, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:06,729 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286 2024-12-09T15:33:06,732 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/zookeeper_0, clientPort=60306, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T15:33:06,733 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60306 2024-12-09T15:33:06,733 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:06,735 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:06,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:33:06,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:33:06,758 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647 with version=8 2024-12-09T15:33:06,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase-staging 2024-12-09T15:33:06,761 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:33:06,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:06,762 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:06,762 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:33:06,762 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:06,762 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:33:06,762 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T15:33:06,762 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:33:06,764 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43929 2024-12-09T15:33:06,766 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43929 connecting to ZooKeeper ensemble=127.0.0.1:60306 2024-12-09T15:33:06,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:439290x0, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:33:06,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43929-0x1012bd1124b0000 connected 2024-12-09T15:33:06,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:06,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:06,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:06,833 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647, hbase.cluster.distributed=false 2024-12-09T15:33:06,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:33:06,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43929 2024-12-09T15:33:06,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43929 2024-12-09T15:33:06,846 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43929 2024-12-09T15:33:06,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43929 2024-12-09T15:33:06,856 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43929 2024-12-09T15:33:06,875 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:33:06,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:06,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:06,875 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:33:06,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:06,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:33:06,875 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:33:06,875 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:33:06,876 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34623 2024-12-09T15:33:06,878 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34623 connecting to ZooKeeper ensemble=127.0.0.1:60306 2024-12-09T15:33:06,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:06,880 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:06,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346230x0, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:33:06,891 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:346230x0, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:06,891 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:33:06,891 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34623-0x1012bd1124b0001 connected 2024-12-09T15:33:06,898 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:33:06,901 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T15:33:06,903 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:33:06,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34623 2024-12-09T15:33:06,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34623 2024-12-09T15:33:06,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34623 2024-12-09T15:33:06,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34623 2024-12-09T15:33:06,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34623 2024-12-09T15:33:06,947 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3651d8ead0f3:43929 2024-12-09T15:33:06,947 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:06,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:06,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:06,963 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:06,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T15:33:06,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:06,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:06,972 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:33:06,974 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3651d8ead0f3,43929,1733758386761 from backup master directory 2024-12-09T15:33:06,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:06,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:06,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:33:06,979 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:33:06,979 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:06,988 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/hbase.id] with ID: cfcb9ab0-b9b1-496d-910a-248427e7f14b 2024-12-09T15:33:06,988 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/.tmp/hbase.id 2024-12-09T15:33:07,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:33:07,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:33:07,023 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/.tmp/hbase.id]:[hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/hbase.id] 2024-12-09T15:33:07,089 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:07,089 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T15:33:07,091 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T15:33:07,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:33:07,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:33:07,130 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:33:07,131 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T15:33:07,132 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:33:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:33:07,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:33:07,164 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store 2024-12-09T15:33:07,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:33:07,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:33:07,195 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:07,195 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:33:07,195 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:07,195 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:07,195 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:33:07,195 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:07,195 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:07,196 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758387195Disabling compacts and flushes for region at 1733758387195Disabling writes for close at 1733758387195Writing region close event to WAL at 1733758387195Closed at 1733758387195 2024-12-09T15:33:07,203 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/.initializing 2024-12-09T15:33:07,203 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:07,210 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C43929%2C1733758386761, suffix=, logDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761, archiveDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/oldWALs, maxLogs=10 2024-12-09T15:33:07,210 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C43929%2C1733758386761.1733758387210 2024-12-09T15:33:07,230 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 2024-12-09T15:33:07,239 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41283:41283),(127.0.0.1/127.0.0.1:36715:36715)] 2024-12-09T15:33:07,242 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:33:07,242 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:07,242 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,243 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T15:33:07,268 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:07,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T15:33:07,275 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:33:07,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T15:33:07,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:33:07,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T15:33:07,289 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:33:07,291 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,292 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,292 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,295 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,295 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,295 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T15:33:07,303 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:33:07,319 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:33:07,320 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857340, jitterRate=0.09016500413417816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T15:33:07,322 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733758387243Initializing all the Stores at 1733758387250 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758387250Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758387250Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758387250Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758387250Cleaning up temporary data from old regions at 1733758387295 (+45 ms)Region opened successfully at 1733758387322 (+27 ms) 2024-12-09T15:33:07,325 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T15:33:07,343 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60601cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:33:07,362 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T15:33:07,362 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T15:33:07,362 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T15:33:07,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T15:33:07,367 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 3 msec 2024-12-09T15:33:07,367 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T15:33:07,367 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T15:33:07,388 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T15:33:07,403 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T15:33:07,411 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T15:33:07,411 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T15:33:07,413 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T15:33:07,414 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T15:33:07,415 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T15:33:07,424 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T15:33:07,425 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T15:33:07,427 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T15:33:07,432 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T15:33:07,439 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T15:33:07,446 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T15:33:07,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:07,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:07,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,452 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3651d8ead0f3,43929,1733758386761, sessionid=0x1012bd1124b0000, setting cluster-up flag (Was=false) 2024-12-09T15:33:07,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,480 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T15:33:07,483 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:07,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:07,501 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T15:33:07,503 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:07,506 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T15:33:07,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:07,509 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T15:33:07,509 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T15:33:07,510 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3651d8ead0f3,43929,1733758386761 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3651d8ead0f3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:33:07,513 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,517 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:07,517 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T15:33:07,519 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,519 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T15:33:07,527 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733758417527 2024-12-09T15:33:07,527 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T15:33:07,527 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T15:33:07,527 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T15:33:07,527 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T15:33:07,528 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T15:33:07,528 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T15:33:07,528 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,528 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T15:33:07,529 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T15:33:07,529 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T15:33:07,534 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T15:33:07,534 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T15:33:07,538 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758387534,5,FailOnTimeoutGroup] 2024-12-09T15:33:07,540 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758387539,5,FailOnTimeoutGroup] 2024-12-09T15:33:07,540 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,540 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T15:33:07,540 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,540 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,540 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(746): ClusterId : cfcb9ab0-b9b1-496d-910a-248427e7f14b 2024-12-09T15:33:07,540 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:33:07,543 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:33:07,543 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:33:07,545 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:33:07,546 DEBUG [RS:0;3651d8ead0f3:34623 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c084a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:33:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:33:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:33:07,548 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T15:33:07,548 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647 2024-12-09T15:33:07,567 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3651d8ead0f3:34623 2024-12-09T15:33:07,567 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:33:07,567 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:33:07,567 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:33:07,569 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,43929,1733758386761 with port=34623, startcode=1733758386874 2024-12-09T15:33:07,569 DEBUG [RS:0;3651d8ead0f3:34623 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:33:07,588 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39617, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:33:07,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:33:07,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:33:07,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:07,594 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43929 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:07,594 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43929 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:07,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:33:07,600 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647 2024-12-09T15:33:07,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:33:07,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:07,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:33:07,602 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38871 2024-12-09T15:33:07,602 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:33:07,604 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:33:07,604 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:07,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:33:07,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:33:07,607 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:07,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:33:07,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:33:07,610 DEBUG [RS:0;3651d8ead0f3:34623 {}] zookeeper.ZKUtil(111): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:07,610 WARN [RS:0;3651d8ead0f3:34623 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:33:07,610 INFO [RS:0;3651d8ead0f3:34623 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:33:07,610 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:07,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:33:07,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:07,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:07,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:33:07,611 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,34623,1733758386874] 2024-12-09T15:33:07,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740 2024-12-09T15:33:07,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740 2024-12-09T15:33:07,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:33:07,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:33:07,616 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:33:07,619 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:33:07,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:33:07,626 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:33:07,629 INFO [RS:0;3651d8ead0f3:34623 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:33:07,629 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,629 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:33:07,629 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:33:07,630 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846849, jitterRate=0.07682502269744873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:33:07,631 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:33:07,631 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,631 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,631 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,631 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,631 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733758387592Initializing all the Stores at 1733758387595 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758387595Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758387597 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758387597Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758387597Cleaning up temporary data from old regions at 1733758387615 (+18 ms)Region opened successfully at 1733758387632 (+17 ms) 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:33:07,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:33:07,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:33:07,632 DEBUG [RS:0;3651d8ead0f3:34623 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:33:07,635 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:33:07,635 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758387632Disabling compacts and flushes for region at 1733758387632Disabling writes for close at 1733758387632Writing region close event to WAL at 1733758387635 (+3 ms)Closed at 1733758387635 2024-12-09T15:33:07,637 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:07,637 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T15:33:07,637 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T15:33:07,638 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,639 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,639 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,639 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,639 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,639 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34623,1733758386874-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:33:07,643 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:33:07,645 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T15:33:07,662 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:33:07,662 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,34623,1733758386874-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,663 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,663 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.Replication(171): 3651d8ead0f3,34623,1733758386874 started 2024-12-09T15:33:07,689 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:07,689 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,34623,1733758386874, RpcServer on 3651d8ead0f3/172.17.0.3:34623, sessionid=0x1012bd1124b0001 2024-12-09T15:33:07,690 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:33:07,690 DEBUG [RS:0;3651d8ead0f3:34623 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:07,690 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,34623,1733758386874' 2024-12-09T15:33:07,690 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:33:07,695 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:33:07,696 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:33:07,696 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:33:07,696 DEBUG [RS:0;3651d8ead0f3:34623 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:07,696 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,34623,1733758386874' 2024-12-09T15:33:07,696 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:33:07,697 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:33:07,698 DEBUG [RS:0;3651d8ead0f3:34623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:33:07,698 INFO [RS:0;3651d8ead0f3:34623 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:33:07,698 INFO [RS:0;3651d8ead0f3:34623 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:33:07,796 WARN [3651d8ead0f3:43929 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T15:33:07,816 INFO [RS:0;3651d8ead0f3:34623 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C34623%2C1733758386874, suffix=, logDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874, archiveDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs, maxLogs=32 2024-12-09T15:33:07,817 INFO [RS:0;3651d8ead0f3:34623 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.1733758387817 2024-12-09T15:33:07,860 INFO [RS:0;3651d8ead0f3:34623 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 2024-12-09T15:33:07,876 DEBUG [RS:0;3651d8ead0f3:34623 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41283:41283),(127.0.0.1/127.0.0.1:36715:36715)] 2024-12-09T15:33:08,047 DEBUG [3651d8ead0f3:43929 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T15:33:08,048 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:08,050 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,34623,1733758386874, state=OPENING 2024-12-09T15:33:08,057 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T15:33:08,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:08,064 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:33:08,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,34623,1733758386874}] 2024-12-09T15:33:08,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:08,066 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:08,066 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:08,221 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T15:33:08,224 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41065, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T15:33:08,229 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T15:33:08,229 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:33:08,232 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C34623%2C1733758386874.meta, suffix=.meta, logDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874, archiveDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs, maxLogs=32 2024-12-09T15:33:08,268 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta 2024-12-09T15:33:08,283 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta 2024-12-09T15:33:08,292 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36715:36715),(127.0.0.1/127.0.0.1:41283:41283)] 2024-12-09T15:33:08,304 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:33:08,305 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T15:33:08,305 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T15:33:08,305 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T15:33:08,305 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T15:33:08,305 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:08,306 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T15:33:08,306 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T15:33:08,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:33:08,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:33:08,310 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:08,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:08,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:33:08,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:33:08,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:08,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:08,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:33:08,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:33:08,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:08,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:08,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:33:08,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:33:08,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:08,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:33:08,315 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:33:08,315 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740 2024-12-09T15:33:08,317 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740 2024-12-09T15:33:08,318 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:33:08,318 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:33:08,318 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:33:08,320 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:33:08,321 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820845, jitterRate=0.04375940561294556}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:33:08,321 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T15:33:08,322 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733758388306Writing region info on filesystem at 1733758388306Initializing all the Stores at 1733758388307 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758388307Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758388308 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758388308Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758388308Cleaning up temporary data from old regions at 1733758388318 (+10 ms)Running coprocessor post-open hooks at 1733758388321 (+3 ms)Region opened successfully at 1733758388321 2024-12-09T15:33:08,323 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733758388221 2024-12-09T15:33:08,326 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T15:33:08,326 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T15:33:08,327 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:08,329 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,34623,1733758386874, state=OPEN 2024-12-09T15:33:08,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:33:08,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:33:08,339 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:08,339 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:08,339 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:33:08,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T15:33:08,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,34623,1733758386874 in 275 msec 2024-12-09T15:33:08,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T15:33:08,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 707 msec 2024-12-09T15:33:08,348 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:33:08,348 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T15:33:08,350 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:33:08,350 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,34623,1733758386874, seqNum=-1] 2024-12-09T15:33:08,350 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:33:08,352 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53717, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:33:08,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 850 msec 2024-12-09T15:33:08,360 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733758388360, completionTime=-1 2024-12-09T15:33:08,361 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T15:33:08,361 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T15:33:08,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T15:33:08,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733758448363 2024-12-09T15:33:08,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733758508363 2024-12-09T15:33:08,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T15:33:08,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43929,1733758386761-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43929,1733758386761-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,364 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43929,1733758386761-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,364 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3651d8ead0f3:43929, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,364 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,364 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,365 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.389sec 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43929,1733758386761-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:33:08,368 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43929,1733758386761-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T15:33:08,371 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T15:33:08,371 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T15:33:08,371 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43929,1733758386761-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,468 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a738e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:33:08,468 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3651d8ead0f3,43929,-1 for getting cluster id 2024-12-09T15:33:08,468 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T15:33:08,471 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cfcb9ab0-b9b1-496d-910a-248427e7f14b' 2024-12-09T15:33:08,471 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T15:33:08,471 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cfcb9ab0-b9b1-496d-910a-248427e7f14b" 2024-12-09T15:33:08,472 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eea958a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:33:08,472 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3651d8ead0f3,43929,-1] 2024-12-09T15:33:08,472 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T15:33:08,473 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:08,475 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60786, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T15:33:08,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69b06183, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:33:08,477 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:33:08,487 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,34623,1733758386874, seqNum=-1] 2024-12-09T15:33:08,487 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:33:08,490 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46150, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:33:08,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:08,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:08,497 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T15:33:08,519 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:33:08,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:08,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:08,519 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:33:08,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:33:08,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:33:08,520 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:33:08,520 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:33:08,521 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45133 2024-12-09T15:33:08,523 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45133 connecting to ZooKeeper ensemble=127.0.0.1:60306 2024-12-09T15:33:08,524 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:08,527 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:33:08,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:451330x0, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:33:08,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:451330x0, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-09T15:33:08,535 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-09T15:33:08,535 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45133-0x1012bd1124b0002 connected 2024-12-09T15:33:08,536 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:33:08,544 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:33:08,545 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:33:08,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:33:08,554 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45133 2024-12-09T15:33:08,570 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45133 2024-12-09T15:33:08,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45133 2024-12-09T15:33:08,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45133 2024-12-09T15:33:08,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45133 2024-12-09T15:33:08,575 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(746): ClusterId : cfcb9ab0-b9b1-496d-910a-248427e7f14b 2024-12-09T15:33:08,575 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:33:08,584 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:33:08,584 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:33:08,589 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:33:08,595 DEBUG [RS:1;3651d8ead0f3:45133 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6816cf5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:33:08,625 DEBUG [RS:1;3651d8ead0f3:45133 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3651d8ead0f3:45133 2024-12-09T15:33:08,625 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:33:08,625 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:33:08,625 DEBUG [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:33:08,626 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,43929,1733758386761 with port=45133, startcode=1733758388518 2024-12-09T15:33:08,627 DEBUG [RS:1;3651d8ead0f3:45133 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:33:08,630 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56887, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:33:08,631 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43929 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:08,631 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43929 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:08,634 DEBUG [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647 2024-12-09T15:33:08,634 DEBUG [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38871 2024-12-09T15:33:08,634 DEBUG [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:33:08,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:33:08,638 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,45133,1733758388518] 2024-12-09T15:33:08,639 DEBUG [RS:1;3651d8ead0f3:45133 {}] zookeeper.ZKUtil(111): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:08,639 WARN [RS:1;3651d8ead0f3:45133 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:33:08,640 INFO [RS:1;3651d8ead0f3:45133 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:33:08,640 DEBUG [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:08,652 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:33:08,661 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:33:08,662 INFO [RS:1;3651d8ead0f3:45133 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:33:08,663 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,664 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:33:08,666 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:33:08,666 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,666 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,666 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,666 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,666 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,666 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:33:08,667 DEBUG [RS:1;3651d8ead0f3:45133 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:33:08,670 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,670 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,671 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,671 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,671 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,671 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,45133,1733758388518-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:33:08,690 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:33:08,690 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,45133,1733758388518-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,690 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,690 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.Replication(171): 3651d8ead0f3,45133,1733758388518 started 2024-12-09T15:33:08,708 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:33:08,708 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,45133,1733758388518, RpcServer on 3651d8ead0f3/172.17.0.3:45133, sessionid=0x1012bd1124b0002 2024-12-09T15:33:08,709 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:33:08,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;3651d8ead0f3:45133,5,FailOnTimeoutGroup] 2024-12-09T15:33:08,709 DEBUG [RS:1;3651d8ead0f3:45133 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:08,709 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,45133,1733758388518' 2024-12-09T15:33:08,709 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:33:08,709 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-09T15:33:08,710 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:33:08,710 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:33:08,710 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:33:08,710 DEBUG [RS:1;3651d8ead0f3:45133 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:08,710 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,45133,1733758388518' 2024-12-09T15:33:08,710 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:33:08,711 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T15:33:08,711 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:33:08,712 DEBUG [RS:1;3651d8ead0f3:45133 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:33:08,712 INFO [RS:1;3651d8ead0f3:45133 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:33:08,712 INFO [RS:1;3651d8ead0f3:45133 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:33:08,712 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:08,712 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@74c93387 2024-12-09T15:33:08,713 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T15:33:08,716 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60798, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T15:33:08,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43929 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T15:33:08,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43929 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T15:33:08,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43929 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:33:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43929 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T15:33:08,721 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T15:33:08,721 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:08,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43929 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-09T15:33:08,723 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T15:33:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43929 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:33:08,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741835_1011 (size=393) 2024-12-09T15:33:08,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741835_1011 (size=393) 2024-12-09T15:33:08,756 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ea79454c5caa6299cd457a756da08be7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647 2024-12-09T15:33:08,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44637 is added to blk_1073741836_1012 (size=76) 2024-12-09T15:33:08,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39255 is added to blk_1073741836_1012 (size=76) 2024-12-09T15:33:08,771 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:08,771 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing ea79454c5caa6299cd457a756da08be7, disabling compactions & flushes 2024-12-09T15:33:08,771 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:08,771 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:08,771 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. after waiting 0 ms 2024-12-09T15:33:08,771 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:08,771 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:08,771 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for ea79454c5caa6299cd457a756da08be7: Waiting for close lock at 1733758388771Disabling compacts and flushes for region at 1733758388771Disabling writes for close at 1733758388771Writing region close event to WAL at 1733758388771Closed at 1733758388771 2024-12-09T15:33:08,779 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T15:33:08,780 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733758388779"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733758388779"}]},"ts":"1733758388779"} 2024-12-09T15:33:08,784 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T15:33:08,785 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T15:33:08,786 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758388785"}]},"ts":"1733758388785"} 2024-12-09T15:33:08,789 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-09T15:33:08,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ea79454c5caa6299cd457a756da08be7, ASSIGN}] 2024-12-09T15:33:08,791 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ea79454c5caa6299cd457a756da08be7, ASSIGN 2024-12-09T15:33:08,793 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ea79454c5caa6299cd457a756da08be7, ASSIGN; state=OFFLINE, location=3651d8ead0f3,34623,1733758386874; forceNewPlan=false, retain=false 2024-12-09T15:33:08,815 INFO [RS:1;3651d8ead0f3:45133 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C45133%2C1733758388518, suffix=, logDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518, archiveDir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs, maxLogs=32 2024-12-09T15:33:08,815 INFO [RS:1;3651d8ead0f3:45133 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C45133%2C1733758388518.1733758388815 2024-12-09T15:33:08,831 INFO [RS:1;3651d8ead0f3:45133 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 2024-12-09T15:33:08,844 DEBUG [RS:1;3651d8ead0f3:45133 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41283:41283),(127.0.0.1/127.0.0.1:36715:36715)] 2024-12-09T15:33:08,944 INFO [3651d8ead0f3:43929 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T15:33:08,945 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea79454c5caa6299cd457a756da08be7, regionState=OPENING, regionLocation=3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:08,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ea79454c5caa6299cd457a756da08be7, ASSIGN because future has completed 2024-12-09T15:33:08,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea79454c5caa6299cd457a756da08be7, server=3651d8ead0f3,34623,1733758386874}] 2024-12-09T15:33:09,111 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:09,112 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ea79454c5caa6299cd457a756da08be7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:33:09,112 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,112 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:33:09,112 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,112 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,114 INFO [StoreOpener-ea79454c5caa6299cd457a756da08be7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,116 INFO [StoreOpener-ea79454c5caa6299cd457a756da08be7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea79454c5caa6299cd457a756da08be7 columnFamilyName info 2024-12-09T15:33:09,116 DEBUG [StoreOpener-ea79454c5caa6299cd457a756da08be7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:33:09,117 INFO [StoreOpener-ea79454c5caa6299cd457a756da08be7-1 {}] regionserver.HStore(327): Store=ea79454c5caa6299cd457a756da08be7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:33:09,117 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,118 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,118 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,119 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,119 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,121 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,128 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:33:09,129 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ea79454c5caa6299cd457a756da08be7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713600, jitterRate=-0.09261150658130646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T15:33:09,129 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:09,130 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ea79454c5caa6299cd457a756da08be7: Running coprocessor pre-open hook at 1733758389113Writing region info on filesystem at 1733758389113Initializing all the Stores at 1733758389114 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758389114Cleaning up temporary data from old regions at 1733758389119 (+5 ms)Running coprocessor post-open hooks at 1733758389129 (+10 ms)Region opened successfully at 1733758389130 (+1 ms) 2024-12-09T15:33:09,132 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7., pid=6, masterSystemTime=1733758389105 2024-12-09T15:33:09,135 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:09,136 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:09,139 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea79454c5caa6299cd457a756da08be7, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:09,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea79454c5caa6299cd457a756da08be7, server=3651d8ead0f3,34623,1733758386874 because future has completed 2024-12-09T15:33:09,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T15:33:09,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ea79454c5caa6299cd457a756da08be7, server=3651d8ead0f3,34623,1733758386874 in 195 msec 2024-12-09T15:33:09,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T15:33:09,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ea79454c5caa6299cd457a756da08be7, ASSIGN in 362 msec 2024-12-09T15:33:09,157 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T15:33:09,157 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758389157"}]},"ts":"1733758389157"} 2024-12-09T15:33:09,162 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-09T15:33:09,164 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T15:33:09,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 448 msec 2024-12-09T15:33:09,819 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:33:09,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:09,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:09,888 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:09,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:11,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:33:11,689 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T15:33:11,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T15:33:11,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-09T15:33:11,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:33:11,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T15:33:11,691 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T15:33:11,691 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T15:33:13,619 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-09T15:33:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43929 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:33:18,814 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-09T15:33:18,814 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-09T15:33:18,817 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T15:33:18,817 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:18,831 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:18,835 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:18,836 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:18,836 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:18,836 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:33:18,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2af8b745{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:18,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6030d470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:18,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41559526{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir/jetty-localhost-41223-hadoop-hdfs-3_4_1-tests_jar-_-any-16466324403469048466/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:18,955 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@157a3fca{HTTP/1.1, (http/1.1)}{localhost:41223} 2024-12-09T15:33:18,955 INFO [Time-limited test {}] server.Server(415): Started @124606ms 2024-12-09T15:33:18,956 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:19,000 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:19,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:19,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:19,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:19,004 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:33:19,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@156f3a55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:19,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca82099{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:19,068 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data6/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:19,068 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data5/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:19,092 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:19,094 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x169cf7b0d0461297 with lease ID 0xf7a85b8dadcc3b38: Processing first storage report for DS-0e61e7bf-bace-4da8-8ebd-957535115adc from datanode DatanodeRegistration(127.0.0.1:42305, datanodeUuid=0acbca04-c925-4d50-922c-f719c8c4e637, infoPort=35101, infoSecurePort=0, ipcPort=36473, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:19,094 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x169cf7b0d0461297 with lease ID 0xf7a85b8dadcc3b38: from storage DS-0e61e7bf-bace-4da8-8ebd-957535115adc node DatanodeRegistration(127.0.0.1:42305, datanodeUuid=0acbca04-c925-4d50-922c-f719c8c4e637, infoPort=35101, infoSecurePort=0, ipcPort=36473, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:19,095 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x169cf7b0d0461297 with lease ID 0xf7a85b8dadcc3b38: Processing first storage report for DS-77e4592d-02af-4fa2-9dd0-b89c5ead6430 from datanode DatanodeRegistration(127.0.0.1:42305, datanodeUuid=0acbca04-c925-4d50-922c-f719c8c4e637, infoPort=35101, infoSecurePort=0, ipcPort=36473, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:19,095 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x169cf7b0d0461297 with lease ID 0xf7a85b8dadcc3b38: from storage DS-77e4592d-02af-4fa2-9dd0-b89c5ead6430 node DatanodeRegistration(127.0.0.1:42305, datanodeUuid=0acbca04-c925-4d50-922c-f719c8c4e637, infoPort=35101, infoSecurePort=0, ipcPort=36473, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:19,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b4117c9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir/jetty-localhost-46459-hadoop-hdfs-3_4_1-tests_jar-_-any-18409099346289095574/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:19,135 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ccc1bc4{HTTP/1.1, (http/1.1)}{localhost:46459} 2024-12-09T15:33:19,135 INFO [Time-limited test {}] server.Server(415): Started @124786ms 2024-12-09T15:33:19,137 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:19,176 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:19,179 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:19,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:19,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:19,183 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:33:19,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@687696f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:19,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fe58b15{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:19,271 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:19,271 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:19,296 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:19,299 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdcb43213d885ae6 with lease ID 0xf7a85b8dadcc3b39: Processing first storage report for DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d from datanode DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdcb43213d885ae6 with lease ID 0xf7a85b8dadcc3b39: from storage DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d node DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:33:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdcb43213d885ae6 with lease ID 0xf7a85b8dadcc3b39: Processing first storage report for DS-6ff9e26a-c829-4140-b1f8-e57a4c3bc5f6 from datanode DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdcb43213d885ae6 with lease ID 0xf7a85b8dadcc3b39: from storage DS-6ff9e26a-c829-4140-b1f8-e57a4c3bc5f6 node DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:19,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69bcca{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir/jetty-localhost-42795-hadoop-hdfs-3_4_1-tests_jar-_-any-879172959252805592/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:19,312 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3247fd57{HTTP/1.1, (http/1.1)}{localhost:42795} 2024-12-09T15:33:19,312 INFO [Time-limited test {}] server.Server(415): Started @124963ms 2024-12-09T15:33:19,313 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:19,452 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data9/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:19,452 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data10/current/BP-1105912312-172.17.0.3-1733758385755/current, will proceed with Du for space computation calculation, 2024-12-09T15:33:19,484 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:19,495 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ac03a3e7781a74f with lease ID 0xf7a85b8dadcc3b3a: Processing first storage report for DS-6db57b63-4d25-4982-ace7-66fbd3b8a303 from datanode DatanodeRegistration(127.0.0.1:33039, datanodeUuid=0410f0d7-2362-4f2f-a8ee-4590c1ee3331, infoPort=45969, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:19,495 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ac03a3e7781a74f with lease ID 0xf7a85b8dadcc3b3a: from storage DS-6db57b63-4d25-4982-ace7-66fbd3b8a303 node DatanodeRegistration(127.0.0.1:33039, datanodeUuid=0410f0d7-2362-4f2f-a8ee-4590c1ee3331, infoPort=45969, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:19,496 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ac03a3e7781a74f with lease ID 0xf7a85b8dadcc3b3a: Processing first storage report for DS-ba604a44-e127-4320-89c9-81008fa11683 from datanode DatanodeRegistration(127.0.0.1:33039, datanodeUuid=0410f0d7-2362-4f2f-a8ee-4590c1ee3331, infoPort=45969, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755) 2024-12-09T15:33:19,496 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ac03a3e7781a74f with lease ID 0xf7a85b8dadcc3b3a: from storage DS-ba604a44-e127-4320-89c9-81008fa11683 node DatanodeRegistration(127.0.0.1:33039, datanodeUuid=0410f0d7-2362-4f2f-a8ee-4590c1ee3331, infoPort=45969, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:19,547 WARN [ResponseProcessor for block BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,548 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 block BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:19,548 WARN [ResponseProcessor for block BP-1105912312-172.17.0.3-1733758385755:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1105912312-172.17.0.3-1733758385755:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,549 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 block BP-1105912312-172.17.0.3-1733758385755:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:19,549 WARN [ResponseProcessor for block BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,549 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 block BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:19,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2016511558_22 at /127.0.0.1:44522 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44522 dst: /127.0.0.1:39255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1828027370_22 at /127.0.0.1:58516 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58516 dst: /127.0.0.1:39255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1828027370_22 at /127.0.0.1:34156 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34156 dst: /127.0.0.1:44637 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,552 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:44562 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44562 dst: /127.0.0.1:39255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,552 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:55716 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55716 dst: /127.0.0.1:44637 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2016511558_22 at /127.0.0.1:55688 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55688 dst: /127.0.0.1:44637 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,554 WARN [ResponseProcessor for block BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,555 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta block BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:19,556 WARN [PacketResponder: BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44637] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,557 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:55722 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55722 dst: /127.0.0.1:44637 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,556 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:44574 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44574 dst: /127.0.0.1:39255 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47d1c4f2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:19,580 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e10767c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:33:19,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:33:19,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b9ab422{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:33:19,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45dc61f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,STOPPED} 2024-12-09T15:33:19,583 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:33:19,583 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:33:19,583 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:33:19,583 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105912312-172.17.0.3-1733758385755 (Datanode Uuid 5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd) service to localhost/127.0.0.1:38871 2024-12-09T15:33:19,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data3/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:19,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data4/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:19,585 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:33:19,585 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta block BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,590 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 block BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,591 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@3d778a79 {}] datanode.DataXceiver(331): 127.0.0.1:39255:DataXceiver error processing unknown operation src: /127.0.0.1:44862 dst: /127.0.0.1:39255 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,591 WARN [ResponseProcessor for block BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,591 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 block BP-1105912312-172.17.0.3-1733758385755:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2016511558_22 at /127.0.0.1:44846 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44846 dst: /127.0.0.1:39255 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:19,608 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@200d0f88{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:19,608 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4743e1dd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:33:19,608 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:33:19,608 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@616f4c13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:33:19,608 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2693ea96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,STOPPED} 2024-12-09T15:33:19,610 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:33:19,610 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:33:19,610 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105912312-172.17.0.3-1733758385755 (Datanode Uuid f6d4c26d-7046-48fd-a2a7-59cbe335c559) service to localhost/127.0.0.1:38871 2024-12-09T15:33:19,610 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:33:19,611 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data1/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:19,611 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data2/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:19,611 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:33:19,617 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7., hostname=3651d8ead0f3,34623,1733758386874, seqNum=2] 2024-12-09T15:33:19,618 ERROR [FSHLog-0-hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647-prefix:3651d8ead0f3,34623,1733758386874 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,619 WARN [FSHLog-0-hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647-prefix:3651d8ead0f3,34623,1733758386874 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,619 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,619 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C34623%2C1733758386874:(num 1733758387817) roll requested 2024-12-09T15:33:19,620 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.1733758399619 2024-12-09T15:33:19,629 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:19,629 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:19,629 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:19,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:19,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:19,630 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758399619 2024-12-09T15:33:19,630 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,631 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:19,632 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T15:33:19,632 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T15:33:19,632 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 2024-12-09T15:33:19,634 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45969:45969),(127.0.0.1/127.0.0.1:35101:35101)] 2024-12-09T15:33:19,635 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:19,637 WARN [IPC Server handler 4 on default port 38871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-09T15:33:19,642 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 after 7ms 2024-12-09T15:33:20,672 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:21,133 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:21,637 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758399619 2024-12-09T15:33:21,638 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:21,638 WARN [ResponseProcessor for block BP-1105912312-172.17.0.3-1733758385755:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1105912312-172.17.0.3-1733758385755:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:21,639 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758399619 block BP-1105912312-172.17.0.3-1733758385755:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:21,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:55652 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55652 dst: /127.0.0.1:33039 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:21,640 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:41172 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:42305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41172 dst: /127.0.0.1:42305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:21,655 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69bcca{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:21,656 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3247fd57{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:33:21,656 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:33:21,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fe58b15{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:33:21,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@687696f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,STOPPED} 2024-12-09T15:33:21,658 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:33:21,658 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:33:21,658 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105912312-172.17.0.3-1733758385755 (Datanode Uuid 0410f0d7-2362-4f2f-a8ee-4590c1ee3331) service to localhost/127.0.0.1:38871 2024-12-09T15:33:21,658 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:33:21,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data9/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:21,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data10/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:21,659 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:33:22,672 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:23,133 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:23,638 WARN [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]] 2024-12-09T15:33:23,638 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:23,639 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C34623%2C1733758386874:(num 1733758399619) roll requested 2024-12-09T15:33:23,639 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.1733758403639 2024-12-09T15:33:23,642 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:23,642 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:23,642 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741839_1021 2024-12-09T15:33:23,643 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 after 4011ms 2024-12-09T15:33:23,645 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:23,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:23,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:23,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:23,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:23,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:23,651 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758399619 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758403639 2024-12-09T15:33:23,652 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35101:35101),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:33:23,652 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:23,652 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758399619 is not closed yet, will try archiving it next time 2024-12-09T15:33:23,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42305 is added to blk_1073741838_1020 (size=2431) 2024-12-09T15:33:23,664 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T15:33:24,054 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:24,105 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7623aa7b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42305, datanodeUuid=0acbca04-c925-4d50-922c-f719c8c4e637, infoPort=35101, infoSecurePort=0, ipcPort=36473, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741838_1020 to 127.0.0.1:44637 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:24,673 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,134 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,652 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,668 WARN [ResponseProcessor for block BP-1105912312-172.17.0.3-1733758385755:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1105912312-172.17.0.3-1733758385755:blk_1073741840_1022 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,668 WARN [DataStreamer for file /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758403639 block BP-1105912312-172.17.0.3-1733758385755:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:25,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:41178 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41178 dst: /127.0.0.1:42305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:25,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60710 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60710 dst: /127.0.0.1:32937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:25,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41559526{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:25,670 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@157a3fca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:33:25,670 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:33:25,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6030d470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:33:25,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2af8b745{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,STOPPED} 2024-12-09T15:33:25,672 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:33:25,672 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:33:25,672 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105912312-172.17.0.3-1733758385755 (Datanode Uuid 0acbca04-c925-4d50-922c-f719c8c4e637) service to localhost/127.0.0.1:38871 2024-12-09T15:33:25,672 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:33:25,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data5/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:25,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data6/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:33:25,673 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:33:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34623 {}] regionserver.HRegion(8855): Flush requested on ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:25,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:33:25,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/cc6aa432e83c47279576bbf15451b7ac is 1080, key is row0002/info:/1733758401661/Put/seqid=0 2024-12-09T15:33:25,703 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60716 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741841_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741841_1024 to mirror 127.0.0.1:42305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:25,703 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:25,703 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741841_1024 2024-12-09T15:33:25,703 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60716 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741841_1024] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:25,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60716 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741841_1024] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60716 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:25,704 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:25,705 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,705 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:25,705 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741842_1025 2024-12-09T15:33:25,706 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:25,707 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,707 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:25,707 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741843_1026 2024-12-09T15:33:25,708 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:25,709 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:25,709 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:25,709 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741844_1027 2024-12-09T15:33:25,709 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:25,710 WARN [IPC Server handler 4 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:25,710 WARN [IPC Server handler 4 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:25,710 WARN [IPC Server handler 4 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:25,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741845_1028 (size=10347) 2024-12-09T15:33:26,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/cc6aa432e83c47279576bbf15451b7ac 2024-12-09T15:33:26,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/cc6aa432e83c47279576bbf15451b7ac as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/cc6aa432e83c47279576bbf15451b7ac 2024-12-09T15:33:26,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/cc6aa432e83c47279576bbf15451b7ac, entries=5, sequenceid=11, filesize=10.1 K 2024-12-09T15:33:26,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for ea79454c5caa6299cd457a756da08be7 in 449ms, sequenceid=11, compaction requested=false 2024-12-09T15:33:26,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:26,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34623 {}] regionserver.HRegion(8855): Flush requested on ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:26,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-09T15:33:26,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/f672a6def6684217a2ba43fd0b551f1d is 1080, key is row0007/info:/1733758405682/Put/seqid=0 2024-12-09T15:33:26,310 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:26,310 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:26,310 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741846_1029 2024-12-09T15:33:26,311 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:26,313 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:26,313 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:26,313 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741847_1030 2024-12-09T15:33:26,314 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:26,315 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:26,315 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:26,315 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741848_1031 2024-12-09T15:33:26,316 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:26,317 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:26,317 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:26,317 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741849_1032 2024-12-09T15:33:26,317 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:26,318 WARN [IPC Server handler 0 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:26,318 WARN [IPC Server handler 0 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:26,318 WARN [IPC Server handler 0 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:26,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741850_1033 (size=12506) 2024-12-09T15:33:26,673 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:26,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/f672a6def6684217a2ba43fd0b551f1d 2024-12-09T15:33:26,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/f672a6def6684217a2ba43fd0b551f1d as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d 2024-12-09T15:33:26,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d, entries=7, sequenceid=24, filesize=12.2 K 2024-12-09T15:33:26,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for ea79454c5caa6299cd457a756da08be7 in 433ms, sequenceid=24, compaction requested=false 2024-12-09T15:33:26,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:26,737 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-09T15:33:26,737 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:26,737 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d because midkey is the same as first or last row 2024-12-09T15:33:27,134 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,301 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@719fe313[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741845_1028 to 127.0.0.1:39255 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,301 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7f580350[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741850_1033 to 127.0.0.1:33039 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,653 WARN [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]] 2024-12-09T15:33:27,653 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,653 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C34623%2C1733758386874:(num 1733758403639) roll requested 2024-12-09T15:33:27,654 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.1733758407653 2024-12-09T15:33:27,657 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,657 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:27,657 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741851_1034 2024-12-09T15:33:27,658 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:27,660 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39255 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60756 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741852_1035 to mirror 127.0.0.1:39255 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,661 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:27,661 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741852_1035 2024-12-09T15:33:27,661 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60756 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T15:33:27,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60756 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60756 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,661 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:27,662 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,662 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:27,662 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741853_1036 2024-12-09T15:33:27,663 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:27,665 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60768 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741854_1037 to mirror 127.0.0.1:42305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,665 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:27,665 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741854_1037 2024-12-09T15:33:27,665 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60768 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T15:33:27,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60768 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60768 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,666 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:27,666 WARN [IPC Server handler 2 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:27,666 WARN [IPC Server handler 2 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:27,667 WARN [IPC Server handler 2 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:27,669 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:27,669 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:27,669 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:27,670 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:27,670 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:27,670 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758403639 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758407653 2024-12-09T15:33:27,671 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:33:27,671 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:27,671 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758403639 is not closed yet, will try archiving it next time 2024-12-09T15:33:27,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741840_1023 (size=25992) 2024-12-09T15:33:27,673 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758399619 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs/3651d8ead0f3%2C34623%2C1733758386874.1733758399619 2024-12-09T15:33:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34623 {}] regionserver.HRegion(8855): Flush requested on ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:27,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T15:33:27,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/905b30b848db488e88414e217b93ab46 is 1079, key is tmprow/info:/1733758407723/Put/seqid=0 2024-12-09T15:33:27,732 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,732 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:27,732 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741856_1039 2024-12-09T15:33:27,733 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:27,734 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,735 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:27,735 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741857_1040 2024-12-09T15:33:27,735 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:27,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60784 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741858_1041 to mirror 127.0.0.1:44637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,738 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44637 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,738 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:27,738 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60784 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:27,738 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741858_1041 2024-12-09T15:33:27,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60784 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60784 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,739 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:27,741 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60788 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741859_1042 to mirror 127.0.0.1:33039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,741 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33039 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:27,741 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60788 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:27,741 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:27,741 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741859_1042 2024-12-09T15:33:27,741 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:60788 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60788 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:27,742 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:27,743 WARN [IPC Server handler 1 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:27,743 WARN [IPC Server handler 1 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:27,743 WARN [IPC Server handler 1 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:27,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741860_1043 (size=6027) 2024-12-09T15:33:28,073 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:28,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/905b30b848db488e88414e217b93ab46 2024-12-09T15:33:28,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/905b30b848db488e88414e217b93ab46 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/905b30b848db488e88414e217b93ab46 2024-12-09T15:33:28,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/905b30b848db488e88414e217b93ab46, entries=1, sequenceid=34, filesize=5.9 K 2024-12-09T15:33:28,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ea79454c5caa6299cd457a756da08be7 in 438ms, sequenceid=34, compaction requested=true 2024-12-09T15:33:28,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:28,163 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-09T15:33:28,163 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:28,163 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d because midkey is the same as first or last row 2024-12-09T15:33:28,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ea79454c5caa6299cd457a756da08be7:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:33:28,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:33:28,163 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:33:28,165 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:33:28,165 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1541): ea79454c5caa6299cd457a756da08be7/info is initiating minor compaction (all files) 2024-12-09T15:33:28,165 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ea79454c5caa6299cd457a756da08be7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:28,165 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/cc6aa432e83c47279576bbf15451b7ac, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/905b30b848db488e88414e217b93ab46] into tmpdir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp, totalSize=28.2 K 2024-12-09T15:33:28,166 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc6aa432e83c47279576bbf15451b7ac, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733758401661 2024-12-09T15:33:28,166 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting f672a6def6684217a2ba43fd0b551f1d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733758405682 2024-12-09T15:33:28,167 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting 905b30b848db488e88414e217b93ab46, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733758407723 2024-12-09T15:33:28,183 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ea79454c5caa6299cd457a756da08be7#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:33:28,184 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/0f870e384f614fb489d97ea8126333a2 is 1080, key is row0002/info:/1733758401661/Put/seqid=0 2024-12-09T15:33:28,186 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:28,186 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:28,186 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741861_1044 2024-12-09T15:33:28,187 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:28,188 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:28,188 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:28,188 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741862_1045 2024-12-09T15:33:28,189 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:28,190 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:28,190 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:28,190 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741863_1046 2024-12-09T15:33:28,191 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:28,192 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:28,192 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:28,192 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741864_1047 2024-12-09T15:33:28,193 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:28,193 WARN [IPC Server handler 0 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:28,193 WARN [IPC Server handler 0 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:28,193 WARN [IPC Server handler 0 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741865_1048 (size=17994) 2024-12-09T15:33:28,604 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/0f870e384f614fb489d97ea8126333a2 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 2024-12-09T15:33:28,611 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ea79454c5caa6299cd457a756da08be7/info of ea79454c5caa6299cd457a756da08be7 into 0f870e384f614fb489d97ea8126333a2(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:28,612 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7., storeName=ea79454c5caa6299cd457a756da08be7/info, priority=13, startTime=1733758408163; duration=0sec 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 because midkey is the same as first or last row 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 because midkey is the same as first or last row 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 because midkey is the same as first or last row 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:33:28,612 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ea79454c5caa6299cd457a756da08be7:info 2024-12-09T15:33:28,673 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,134 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34623 {}] regionserver.HRegion(8855): Flush requested on ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:29,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T15:33:29,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/7876cd6aae2346598b4e1b76f0773a45 is 1079, key is tmprow/info:/1733758409144/Put/seqid=0 2024-12-09T15:33:29,151 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,151 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:29,151 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741866_1049 2024-12-09T15:33:29,152 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:29,153 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,153 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:29,153 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741867_1050 2024-12-09T15:33:29,153 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:29,155 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44637 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,155 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48898 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741868_1051 to mirror 127.0.0.1:44637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:29,155 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:29,155 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741868_1051 2024-12-09T15:33:29,155 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48898 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:29,156 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48898 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48898 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:29,156 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:29,158 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39255 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48906 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741869_1052 to mirror 127.0.0.1:39255 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:29,158 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:29,158 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741869_1052 2024-12-09T15:33:29,158 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48906 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:29,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48906 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48906 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:29,158 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:29,159 WARN [IPC Server handler 3 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:29,159 WARN [IPC Server handler 3 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:29,159 WARN [IPC Server handler 3 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:29,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741870_1053 (size=6027) 2024-12-09T15:33:29,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/7876cd6aae2346598b4e1b76f0773a45 2024-12-09T15:33:29,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/7876cd6aae2346598b4e1b76f0773a45 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/7876cd6aae2346598b4e1b76f0773a45 2024-12-09T15:33:29,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/7876cd6aae2346598b4e1b76f0773a45, entries=1, sequenceid=45, filesize=5.9 K 2024-12-09T15:33:29,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ea79454c5caa6299cd457a756da08be7 in 432ms, sequenceid=45, compaction requested=false 2024-12-09T15:33:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-09T15:33:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 because midkey is the same as first or last row 2024-12-09T15:33:29,673 WARN [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]] 2024-12-09T15:33:29,673 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,673 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C34623%2C1733758386874:(num 1733758407653) roll requested 2024-12-09T15:33:29,673 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.1733758409673 2024-12-09T15:33:29,678 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,678 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48916 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741871_1054 to mirror 127.0.0.1:42305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:29,678 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:29,678 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741871_1054 2024-12-09T15:33:29,678 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48916 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T15:33:29,678 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48916 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48916 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:29,679 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:29,680 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,681 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:29,681 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741872_1055 2024-12-09T15:33:29,681 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:29,683 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,683 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:29,683 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741873_1056 2024-12-09T15:33:29,683 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:29,685 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:29,685 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:29,685 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741874_1057 2024-12-09T15:33:29,686 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:29,686 WARN [IPC Server handler 3 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:29,686 WARN [IPC Server handler 3 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:29,687 WARN [IPC Server handler 3 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:29,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:29,689 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:29,690 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:29,690 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:29,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:29,690 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758407653 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758409673 2024-12-09T15:33:29,691 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:33:29,691 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:29,691 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758407653 is not closed yet, will try archiving it next time 2024-12-09T15:33:29,691 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758403639 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs/3651d8ead0f3%2C34623%2C1733758386874.1733758403639 2024-12-09T15:33:29,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741855_1038 (size=13591) 2024-12-09T15:33:30,093 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:30,302 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@719fe313[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741860_1043 to 127.0.0.1:33039 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:30,302 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7f580350[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741840_1023 to 127.0.0.1:33039 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34623 {}] regionserver.HRegion(8855): Flush requested on ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:30,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T15:33:30,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e20c069d69574550b5cd216c5e6599b3 is 1079, key is tmprow/info:/1733758410565/Put/seqid=0 2024-12-09T15:33:30,575 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:30,575 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:30,575 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741876_1059 2024-12-09T15:33:30,576 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:30,578 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:30,578 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:30,578 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741877_1060 2024-12-09T15:33:30,579 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:30,580 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:30,580 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:30,580 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741878_1061 2024-12-09T15:33:30,581 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:30,582 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:30,582 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:30,583 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741879_1062 2024-12-09T15:33:30,583 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:30,584 WARN [IPC Server handler 2 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:30,584 WARN [IPC Server handler 2 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:30,584 WARN [IPC Server handler 2 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741880_1063 (size=6027) 2024-12-09T15:33:30,674 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:30,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e20c069d69574550b5cd216c5e6599b3 2024-12-09T15:33:30,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e20c069d69574550b5cd216c5e6599b3 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e20c069d69574550b5cd216c5e6599b3 2024-12-09T15:33:31,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e20c069d69574550b5cd216c5e6599b3, entries=1, sequenceid=55, filesize=5.9 K 2024-12-09T15:33:31,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ea79454c5caa6299cd457a756da08be7 in 453ms, sequenceid=55, compaction requested=true 2024-12-09T15:33:31,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:31,020 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-09T15:33:31,020 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:31,020 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 because midkey is the same as first or last row 2024-12-09T15:33:31,021 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:33:31,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ea79454c5caa6299cd457a756da08be7:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:33:31,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:33:31,022 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:33:31,023 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1541): ea79454c5caa6299cd457a756da08be7/info is initiating minor compaction (all files) 2024-12-09T15:33:31,023 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ea79454c5caa6299cd457a756da08be7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:31,023 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/7876cd6aae2346598b4e1b76f0773a45, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e20c069d69574550b5cd216c5e6599b3] into tmpdir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp, totalSize=29.3 K 2024-12-09T15:33:31,024 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f870e384f614fb489d97ea8126333a2, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733758401661 2024-12-09T15:33:31,024 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7876cd6aae2346598b4e1b76f0773a45, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733758409144 2024-12-09T15:33:31,025 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting e20c069d69574550b5cd216c5e6599b3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733758410565 2024-12-09T15:33:31,055 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ea79454c5caa6299cd457a756da08be7#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:33:31,056 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/1f693822f74644f59aca67e31ed8267f is 1080, key is row0002/info:/1733758401661/Put/seqid=0 2024-12-09T15:33:31,058 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:31,058 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:31,058 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741881_1064 2024-12-09T15:33:31,059 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:31,060 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:31,061 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:31,061 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741882_1065 2024-12-09T15:33:31,061 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:31,064 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44637 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:31,064 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]) is bad. 2024-12-09T15:33:31,064 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741883_1066 2024-12-09T15:33:31,065 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44637,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK] 2024-12-09T15:33:31,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48936 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741883_1066 to mirror 127.0.0.1:44637 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:31,066 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48936 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:31,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48936 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48936 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:31,072 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42305 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:31,072 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48950 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741884_1067 to mirror 127.0.0.1:42305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:31,072 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:31,072 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741884_1067 2024-12-09T15:33:31,072 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48950 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:31,073 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48950 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48950 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:31,073 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:31,075 WARN [IPC Server handler 1 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T15:33:31,075 WARN [IPC Server handler 1 on default port 38871 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T15:33:31,075 WARN [IPC Server handler 1 on default port 38871 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T15:33:31,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741885_1068 (size=18097) 2024-12-09T15:33:31,089 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/1f693822f74644f59aca67e31ed8267f as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f 2024-12-09T15:33:31,099 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ea79454c5caa6299cd457a756da08be7/info of ea79454c5caa6299cd457a756da08be7 into 1f693822f74644f59aca67e31ed8267f(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:33:31,099 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:31,099 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7., storeName=ea79454c5caa6299cd457a756da08be7/info, priority=13, startTime=1733758411021; duration=0sec 2024-12-09T15:33:31,099 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T15:33:31,099 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:31,099 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f because midkey is the same as first or last row 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f because midkey is the same as first or last row 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f because midkey is the same as first or last row 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:33:31,100 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ea79454c5caa6299cd457a756da08be7:info 2024-12-09T15:33:31,135 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:31,302 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@719fe313[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741870_1053 to 127.0.0.1:33039 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:31,302 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7f580350[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741865_1048 to 127.0.0.1:42305 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:31,691 WARN [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-09T15:33:31,692 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:31,798 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:33:31,808 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:33:31,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:33:31,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:33:31,811 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:33:31,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61815e22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:33:31,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7177a9b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:33:31,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39eaf0e6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/java.io.tmpdir/jetty-localhost-45999-hadoop-hdfs-3_4_1-tests_jar-_-any-8610876766768737604/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:33:31,968 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1542e930{HTTP/1.1, (http/1.1)}{localhost:45999} 2024-12-09T15:33:31,968 INFO [Time-limited test {}] server.Server(415): Started @137620ms 2024-12-09T15:33:31,971 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:33:32,252 WARN [Thread-991 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:33:32,257 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeadd702a42d2f8ad with lease ID 0xf7a85b8dadcc3b3b: from storage DS-2f8f3119-b715-4d10-a31f-b2493621b334 node DatanodeRegistration(127.0.0.1:36667, datanodeUuid=5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd, infoPort=33275, infoSecurePort=0, ipcPort=41643, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:33:32,257 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeadd702a42d2f8ad with lease ID 0xf7a85b8dadcc3b3b: from storage DS-4b0840f3-828e-40c8-84de-b47d95ca562f node DatanodeRegistration(127.0.0.1:36667, datanodeUuid=5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd, infoPort=33275, infoSecurePort=0, ipcPort=41643, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:33:32,674 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:33,135 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:33,302 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7f580350[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32937, datanodeUuid=046bf575-8dc6-4a60-8c58-4e0bc9aca94c, infoPort=40877, infoSecurePort=0, ipcPort=37523, storageInfo=lv=-57;cid=testClusterID;nsid=916608892;c=1733758385755):Failed to transfer BP-1105912312-172.17.0.3-1733758385755:blk_1073741855_1038 to 127.0.0.1:42305 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:33,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741880_1063 (size=6027) 2024-12-09T15:33:33,692 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:34,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741885_1068 (size=18097) 2024-12-09T15:33:34,675 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:35,136 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:35,692 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:36,675 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:36,729 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:33:37,136 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:37,535 ERROR [FSHLog-0-hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData-prefix:3651d8ead0f3,43929,1733758386761 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:37,536 WARN [FSHLog-0-hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData-prefix:3651d8ead0f3,43929,1733758386761 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:37,542 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C43929%2C1733758386761:(num 1733758387210) roll requested 2024-12-09T15:33:37,543 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C43929%2C1733758386761.1733758417542 2024-12-09T15:33:37,560 WARN [Thread-1011 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33039 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:37,560 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2016511558_22 at /127.0.0.1:48426 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data4]'}, localName='127.0.0.1:36667', datanodeUuid='5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741886_1069 to mirror 127.0.0.1:33039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:37,561 WARN [Thread-1011 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36667,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK], DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:37,561 WARN [Thread-1011 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741886_1069 2024-12-09T15:33:37,561 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2016511558_22 at /127.0.0.1:48426 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T15:33:37,561 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2016511558_22 at /127.0.0.1:48426 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:36667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48426 dst: /127.0.0.1:36667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:37,563 WARN [Thread-1011 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:37,588 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:37,589 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:37,589 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:37,590 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:37,590 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:37,590 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758417542 2024-12-09T15:33:37,598 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:37,599 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:37,599 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 2024-12-09T15:33:37,604 WARN [IPC Server handler 2 on default port 38871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741830_1014 2024-12-09T15:33:37,606 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 after 7ms 2024-12-09T15:33:37,607 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33275:33275),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:33:37,607 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 is not closed yet, will try archiving it next time 2024-12-09T15:33:37,693 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:38,676 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:39,693 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:40,676 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:41,608 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 after 4009ms 2024-12-09T15:33:41,694 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:42,677 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:43,694 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:44,678 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,124 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.1733758425124 2024-12-09T15:33:45,143 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,143 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:36667,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:45,143 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741888_1072 2024-12-09T15:33:45,147 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:45,150 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,151 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:45,151 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741889_1073 2024-12-09T15:33:45,155 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:45,157 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,157 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:36667,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:45,157 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741890_1074 2024-12-09T15:33:45,158 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:45,166 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,167 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,167 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,167 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,167 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,167 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758409673 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758425124 2024-12-09T15:33:45,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741875_1058 (size=12911) 2024-12-09T15:33:45,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40877:40877),(127.0.0.1/127.0.0.1:33275:33275)] 2024-12-09T15:33:45,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:45,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758409673 is not closed yet, will try archiving it next time 2024-12-09T15:33:45,172 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758407653 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs/3651d8ead0f3%2C34623%2C1733758386874.1733758407653 2024-12-09T15:33:45,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34623 {}] regionserver.HRegion(8855): Flush requested on ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:45,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T15:33:45,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e1ff6db1beec4158911b36a4c7399910 is 1080, key is row0013/info:/1733758425174/Put/seqid=0 2024-12-09T15:33:45,194 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,194 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:45,194 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741892_1076 2024-12-09T15:33:45,195 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:45,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741893_1077 (size=8190) 2024-12-09T15:33:45,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741893_1077 (size=8190) 2024-12-09T15:33:45,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e1ff6db1beec4158911b36a4c7399910 2024-12-09T15:33:45,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e1ff6db1beec4158911b36a4c7399910 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e1ff6db1beec4158911b36a4c7399910 2024-12-09T15:33:45,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e1ff6db1beec4158911b36a4c7399910, entries=3, sequenceid=66, filesize=8.0 K 2024-12-09T15:33:45,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for ea79454c5caa6299cd457a756da08be7 in 63ms, sequenceid=66, compaction requested=false 2024-12-09T15:33:45,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:45,245 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-12-09T15:33:45,245 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:45,245 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f because midkey is the same as first or last row 2024-12-09T15:33:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34623 {}] regionserver.HRegion(8855): Flush requested on ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:45,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T15:33:45,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e85585500e2849c28da6d8dce34a61ae is 1080, key is row0015/info:/1733758425184/Put/seqid=0 2024-12-09T15:33:45,500 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,500 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:45,500 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741894_1078 2024-12-09T15:33:45,503 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:45,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48702 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741895_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741895_1079 to mirror 127.0.0.1:39255 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:45,508 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48702 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741895_1079] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:45,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48702 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741895_1079] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48702 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:45,510 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39255 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,511 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:45,511 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741895_1079 2024-12-09T15:33:45,512 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:45,575 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 is not closed yet, will try archiving it next time 2024-12-09T15:33:45,576 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758409673 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs/3651d8ead0f3%2C34623%2C1733758386874.1733758409673 2024-12-09T15:33:45,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741896_1080 (size=14660) 2024-12-09T15:33:45,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e85585500e2849c28da6d8dce34a61ae 2024-12-09T15:33:45,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741896_1080 (size=14660) 2024-12-09T15:33:45,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/e85585500e2849c28da6d8dce34a61ae as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e85585500e2849c28da6d8dce34a61ae 2024-12-09T15:33:45,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e85585500e2849c28da6d8dce34a61ae, entries=9, sequenceid=79, filesize=14.3 K 2024-12-09T15:33:45,696 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-09T15:33:45,696 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=1.05 KB/1075 for ea79454c5caa6299cd457a756da08be7 in 243ms, sequenceid=79, compaction requested=true 2024-12-09T15:33:45,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:45,703 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-12-09T15:33:45,703 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:33:45,703 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f because midkey is the same as first or last row 2024-12-09T15:33:45,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ea79454c5caa6299cd457a756da08be7:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:33:45,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:33:45,703 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:33:45,719 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:33:45,720 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1541): ea79454c5caa6299cd457a756da08be7/info is initiating minor compaction (all files) 2024-12-09T15:33:45,720 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ea79454c5caa6299cd457a756da08be7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:45,720 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e1ff6db1beec4158911b36a4c7399910, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e85585500e2849c28da6d8dce34a61ae] into tmpdir=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp, totalSize=40.0 K 2024-12-09T15:33:45,724 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f693822f74644f59aca67e31ed8267f, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733758401661 2024-12-09T15:33:45,724 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting e1ff6db1beec4158911b36a4c7399910, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1733758411583 2024-12-09T15:33:45,725 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] compactions.Compactor(225): Compacting e85585500e2849c28da6d8dce34a61ae, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733758425184 2024-12-09T15:33:45,790 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ea79454c5caa6299cd457a756da08be7#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:33:45,791 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/9a3ee94eb51f450d9a7befb38bb4003b is 1080, key is row0002/info:/1733758401661/Put/seqid=0 2024-12-09T15:33:45,797 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,798 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741897_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK], DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:45,798 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741897_1081 2024-12-09T15:33:45,803 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:45,810 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,810 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:36667,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:45,810 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741898_1082 2024-12-09T15:33:45,818 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:45,832 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48714 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741899_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741899_1083 to mirror 127.0.0.1:33039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:45,833 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48714 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741899_1083] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:45,833 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33039 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,833 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741899_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:45,833 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741899_1083 2024-12-09T15:33:45,834 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48714 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741899_1083] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48714 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:45,839 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:45,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T15:33:45,871 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:33:45,871 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:33:45,871 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:45,871 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:45,872 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T15:33:45,872 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=244177517, stopped=false 2024-12-09T15:33:45,872 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3651d8ead0f3,43929,1733758386761 2024-12-09T15:33:45,872 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T15:33:45,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:45,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:45,885 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:33:45,885 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:33:45,885 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:33:45,886 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:45,886 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,34623,1733758386874' ***** 2024-12-09T15:33:45,886 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:33:45,886 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,45133,1733758388518' ***** 2024-12-09T15:33:45,886 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:33:45,886 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:33:45,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:45,886 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:33:45,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:45,895 INFO [RS:1;3651d8ead0f3:45133 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:33:45,895 INFO [RS:1;3651d8ead0f3:45133 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:33:45,895 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:45,895 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:33:45,895 INFO [RS:1;3651d8ead0f3:45133 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3651d8ead0f3:45133. 2024-12-09T15:33:45,895 DEBUG [RS:1;3651d8ead0f3:45133 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:33:45,896 DEBUG [RS:1;3651d8ead0f3:45133 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:45,896 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,45133,1733758388518; all regions closed. 2024-12-09T15:33:45,896 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:33:45,896 INFO [RS:0;3651d8ead0f3:34623 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:33:45,897 INFO [RS:0;3651d8ead0f3:34623 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:33:45,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:33:45,897 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(3091): Received CLOSE for ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:45,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:45,897 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:33:45,897 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:45,903 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:45,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741900_1084 (size=28989) 2024-12-09T15:33:45,907 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,907 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,907 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,910 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,910 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:45,911 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:45,911 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:33:45,911 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:33:45,911 INFO [RS:0;3651d8ead0f3:34623 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3651d8ead0f3:34623. 2024-12-09T15:33:45,911 DEBUG [RS:0;3651d8ead0f3:34623 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:33:45,911 DEBUG [RS:0;3651d8ead0f3:34623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:45,911 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:33:45,911 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:33:45,915 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ea79454c5caa6299cd457a756da08be7, disabling compactions & flushes 2024-12-09T15:33:45,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741900_1084 (size=28989) 2024-12-09T15:33:45,915 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1993): waiting for 1 compactions to complete for region TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:45,920 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,920 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,920 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 2024-12-09T15:33:45,922 WARN [IPC Server handler 0 on default port 38871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 has not been closed. Lease recovery is in progress. RecoveryId = 1085 for block blk_1073741837_1013 2024-12-09T15:33:45,923 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 after 3ms 2024-12-09T15:33:45,925 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/9a3ee94eb51f450d9a7befb38bb4003b as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/9a3ee94eb51f450d9a7befb38bb4003b 2024-12-09T15:33:45,947 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ea79454c5caa6299cd457a756da08be7/info of ea79454c5caa6299cd457a756da08be7 into 9a3ee94eb51f450d9a7befb38bb4003b(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:33:45,947 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ea79454c5caa6299cd457a756da08be7: 2024-12-09T15:33:45,947 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:45,947 INFO [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7., storeName=ea79454c5caa6299cd457a756da08be7/info, priority=13, startTime=1733758425703; duration=0sec 2024-12-09T15:33:45,948 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:45,948 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:33:45,948 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. after waiting 0 ms 2024-12-09T15:33:45,948 DEBUG [RS:0;3651d8ead0f3:34623-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ea79454c5caa6299cd457a756da08be7:info 2024-12-09T15:33:45,948 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:45,948 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ea79454c5caa6299cd457a756da08be7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T15:33:45,950 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:33:45,950 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T15:33:45,951 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T15:33:45,951 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1325): Online Regions={ea79454c5caa6299cd457a756da08be7=TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T15:33:45,951 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:45,951 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:33:45,952 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:33:45,952 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:33:45,952 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:33:45,952 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:33:45,952 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-09T15:33:45,954 ERROR [FSHLog-0-hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647-prefix:3651d8ead0f3,34623,1733758386874.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,955 WARN [FSHLog-0-hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647-prefix:3651d8ead0f3,34623,1733758386874.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,955 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C34623%2C1733758386874.meta:.meta(num 1733758388267) roll requested 2024-12-09T15:33:45,955 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C34623%2C1733758386874.meta.1733758425955.meta 2024-12-09T15:33:45,980 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/3b9ecf8a3da44208be98150e605afe84 is 1079, key is tmprow/info:/1733758425659/Put/seqid=0 2024-12-09T15:33:45,987 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,987 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:45,987 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741901_1086 2024-12-09T15:33:45,990 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:45,996 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39255 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:45,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48738 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741902_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741902_1087 to mirror 127.0.0.1:39255 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:45,996 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741902_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:45,996 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741902_1087 2024-12-09T15:33:45,996 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48738 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741902_1087] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T15:33:45,996 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48738 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741902_1087] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48738 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:46,003 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:46,005 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48744 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741903_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8]'}, localName='127.0.0.1:32937', datanodeUuid='046bf575-8dc6-4a60-8c58-4e0bc9aca94c', xmitsInProgress=0}:Exception transferring block BP-1105912312-172.17.0.3-1733758385755:blk_1073741903_1088 to mirror 127.0.0.1:39255 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:46,005 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48744 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741903_1088] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T15:33:46,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1655661553_22 at /127.0.0.1:48744 [Receiving block BP-1105912312-172.17.0.3-1733758385755:blk_1073741903_1088] {}] datanode.DataXceiver(331): 127.0.0.1:32937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48744 dst: /127.0.0.1:32937 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:33:46,006 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39255 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:46,007 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK], DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]) is bad. 2024-12-09T15:33:46,007 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741903_1088 2024-12-09T15:33:46,013 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:46,013 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741904_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK], DatanodeInfoWithStorage[127.0.0.1:36667,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK]) is bad. 2024-12-09T15:33:46,013 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741904_1089 2024-12-09T15:33:46,015 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33039,DS-6db57b63-4d25-4982-ace7-66fbd3b8a303,DISK] 2024-12-09T15:33:46,015 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK] 2024-12-09T15:33:46,018 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:46,018 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741906_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:32937,DS-60a1baeb-3dc9-4b66-b3c2-48a229a00c1d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:46,018 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741906_1091 2024-12-09T15:33:46,021 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:46,021 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1105912312-172.17.0.3-1733758385755:blk_1073741905_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK], DatanodeInfoWithStorage[127.0.0.1:36667,DS-2f8f3119-b715-4d10-a31f-b2493621b334,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK]) is bad. 2024-12-09T15:33:46,021 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-1105912312-172.17.0.3-1733758385755:blk_1073741905_1090 2024-12-09T15:33:46,022 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:46,030 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42305,DS-0e61e7bf-bace-4da8-8ebd-957535115adc,DISK] 2024-12-09T15:33:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741907_1092 (size=6027) 2024-12-09T15:33:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741907_1092 (size=6027) 2024-12-09T15:33:46,092 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/3b9ecf8a3da44208be98150e605afe84 2024-12-09T15:33:46,103 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 131 ms, current pipeline: null 2024-12-09T15:33:46,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,104 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,114 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,117 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,120 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,121 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758425955.meta 2024-12-09T15:33:46,126 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:46,127 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39255,DS-0bfae810-8754-461c-a55d-3954b9a63e73,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:33:46,127 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta 2024-12-09T15:33:46,129 WARN [IPC Server handler 4 on default port 38871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta has not been closed. Lease recovery is in progress. RecoveryId = 1094 for block blk_1073741834_1010 2024-12-09T15:33:46,130 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/.tmp/info/3b9ecf8a3da44208be98150e605afe84 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/3b9ecf8a3da44208be98150e605afe84 2024-12-09T15:33:46,133 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta after 6ms 2024-12-09T15:33:46,142 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/3b9ecf8a3da44208be98150e605afe84, entries=1, sequenceid=84, filesize=5.9 K 2024-12-09T15:33:46,146 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ea79454c5caa6299cd457a756da08be7 in 197ms, sequenceid=84, compaction requested=false 2024-12-09T15:33:46,150 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33275:33275),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:33:46,151 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta is not closed yet, will try archiving it next time 2024-12-09T15:33:46,151 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/cc6aa432e83c47279576bbf15451b7ac, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/905b30b848db488e88414e217b93ab46, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/7876cd6aae2346598b4e1b76f0773a45, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e20c069d69574550b5cd216c5e6599b3, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e1ff6db1beec4158911b36a4c7399910, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e85585500e2849c28da6d8dce34a61ae] to archive 2024-12-09T15:33:46,153 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T15:33:46,153 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ea79454c5caa6299cd457a756da08be7 2024-12-09T15:33:46,168 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/cc6aa432e83c47279576bbf15451b7ac to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/cc6aa432e83c47279576bbf15451b7ac 2024-12-09T15:33:46,182 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/f672a6def6684217a2ba43fd0b551f1d 2024-12-09T15:33:46,192 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/0f870e384f614fb489d97ea8126333a2 2024-12-09T15:33:46,195 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/905b30b848db488e88414e217b93ab46 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/905b30b848db488e88414e217b93ab46 2024-12-09T15:33:46,203 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/7876cd6aae2346598b4e1b76f0773a45 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/7876cd6aae2346598b4e1b76f0773a45 2024-12-09T15:33:46,206 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/info/f238907fbe274caeaefa237c1f250219 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7./info:regioninfo/1733758389139/Put/seqid=0 2024-12-09T15:33:46,226 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/1f693822f74644f59aca67e31ed8267f 2024-12-09T15:33:46,242 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e20c069d69574550b5cd216c5e6599b3 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e20c069d69574550b5cd216c5e6599b3 2024-12-09T15:33:46,253 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e1ff6db1beec4158911b36a4c7399910 to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e1ff6db1beec4158911b36a4c7399910 2024-12-09T15:33:46,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741909_1095 (size=7089) 2024-12-09T15:33:46,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741909_1095 (size=7089) 2024-12-09T15:33:46,256 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e85585500e2849c28da6d8dce34a61ae to hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/info/e85585500e2849c28da6d8dce34a61ae 2024-12-09T15:33:46,256 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3651d8ead0f3:43929 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T15:33:46,257 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cc6aa432e83c47279576bbf15451b7ac=10347, f672a6def6684217a2ba43fd0b551f1d=12506, 0f870e384f614fb489d97ea8126333a2=17994, 905b30b848db488e88414e217b93ab46=6027, 7876cd6aae2346598b4e1b76f0773a45=6027, 1f693822f74644f59aca67e31ed8267f=18097, e20c069d69574550b5cd216c5e6599b3=6027, e1ff6db1beec4158911b36a4c7399910=8190, e85585500e2849c28da6d8dce34a61ae=14660] 2024-12-09T15:33:46,266 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/info/f238907fbe274caeaefa237c1f250219 2024-12-09T15:33:46,313 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ea79454c5caa6299cd457a756da08be7/recovered.edits/87.seqid, newMaxSeqId=87, maxSeqId=1 2024-12-09T15:33:46,320 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:46,320 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ea79454c5caa6299cd457a756da08be7: Waiting for close lock at 1733758425915Running coprocessor pre-close hooks at 1733758425915Disabling compacts and flushes for region at 1733758425915Disabling writes for close at 1733758425948 (+33 ms)Obtaining lock to block concurrent updates at 1733758425948Preparing flush snapshotting stores in ea79454c5caa6299cd457a756da08be7 at 1733758425948Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733758425948Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. at 1733758425950 (+2 ms)Flushing ea79454c5caa6299cd457a756da08be7/info: creating writer at 1733758425951 (+1 ms)Flushing ea79454c5caa6299cd457a756da08be7/info: appending metadata at 1733758425979 (+28 ms)Flushing ea79454c5caa6299cd457a756da08be7/info: closing flushed file at 1733758425980 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41600a4b: reopening flushed file at 1733758426125 (+145 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ea79454c5caa6299cd457a756da08be7 in 197ms, sequenceid=84, compaction requested=false at 1733758426146 (+21 ms)Writing region close event to WAL at 1733758426263 (+117 ms)Running coprocessor post-close hooks at 1733758426320 (+57 ms)Closed at 1733758426320 2024-12-09T15:33:46,320 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733758388717.ea79454c5caa6299cd457a756da08be7. 2024-12-09T15:33:46,354 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:33:46,390 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/ns/8d72acc7430d482fab561261249a4b6c is 43, key is default/ns:d/1733758388353/Put/seqid=0 2024-12-09T15:33:46,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741910_1096 (size=5153) 2024-12-09T15:33:46,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741910_1096 (size=5153) 2024-12-09T15:33:46,460 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/ns/8d72acc7430d482fab561261249a4b6c 2024-12-09T15:33:46,509 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/table/8702a2bcf11e4ddba3847ea7c0bce317 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733758389157/Put/seqid=0 2024-12-09T15:33:46,555 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:33:46,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741911_1097 (size=5424) 2024-12-09T15:33:46,602 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/table/8702a2bcf11e4ddba3847ea7c0bce317 2024-12-09T15:33:46,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741911_1097 (size=5424) 2024-12-09T15:33:46,631 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/info/f238907fbe274caeaefa237c1f250219 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/info/f238907fbe274caeaefa237c1f250219 2024-12-09T15:33:46,665 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/info/f238907fbe274caeaefa237c1f250219, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T15:33:46,667 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/ns/8d72acc7430d482fab561261249a4b6c as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/ns/8d72acc7430d482fab561261249a4b6c 2024-12-09T15:33:46,674 INFO [regionserver/3651d8ead0f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:33:46,680 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T15:33:46,680 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T15:33:46,688 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/ns/8d72acc7430d482fab561261249a4b6c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T15:33:46,697 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/.tmp/table/8702a2bcf11e4ddba3847ea7c0bce317 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/table/8702a2bcf11e4ddba3847ea7c0bce317 2024-12-09T15:33:46,712 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T15:33:46,712 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T15:33:46,719 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/table/8702a2bcf11e4ddba3847ea7c0bce317, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T15:33:46,721 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 769ms, sequenceid=11, compaction requested=false 2024-12-09T15:33:46,755 DEBUG [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:33:46,770 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T15:33:46,772 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:33:46,772 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:33:46,772 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758425951Running coprocessor pre-close hooks at 1733758425951Disabling compacts and flushes for region at 1733758425951Disabling writes for close at 1733758425952 (+1 ms)Obtaining lock to block concurrent updates at 1733758425952Preparing flush snapshotting stores in 1588230740 at 1733758425952Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733758425953 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733758426157 (+204 ms)Flushing 1588230740/info: creating writer at 1733758426157Flushing 1588230740/info: appending metadata at 1733758426201 (+44 ms)Flushing 1588230740/info: closing flushed file at 1733758426201Flushing 1588230740/ns: creating writer at 1733758426311 (+110 ms)Flushing 1588230740/ns: appending metadata at 1733758426390 (+79 ms)Flushing 1588230740/ns: closing flushed file at 1733758426390Flushing 1588230740/table: creating writer at 1733758426472 (+82 ms)Flushing 1588230740/table: appending metadata at 1733758426508 (+36 ms)Flushing 1588230740/table: closing flushed file at 1733758426508Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d21803: reopening flushed file at 1733758426623 (+115 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b537472: reopening flushed file at 1733758426666 (+43 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c486f1f: reopening flushed file at 1733758426688 (+22 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 769ms, sequenceid=11, compaction requested=false at 1733758426721 (+33 ms)Writing region close event to WAL at 1733758426744 (+23 ms)Running coprocessor post-close hooks at 1733758426771 (+27 ms)Closed at 1733758426772 (+1 ms) 2024-12-09T15:33:46,772 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T15:33:46,962 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,34623,1733758386874; all regions closed. 2024-12-09T15:33:46,970 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,974 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,975 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,978 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,979 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:46,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741908_1093 (size=825) 2024-12-09T15:33:46,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741908_1093 (size=825) 2024-12-09T15:33:47,650 INFO [regionserver/3651d8ead0f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:33:48,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741835_1011 (size=393) 2024-12-09T15:33:48,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:33:48,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741875_1058 (size=12911) 2024-12-09T15:33:48,423 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T15:33:48,423 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T15:33:49,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:33:49,930 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 after 4010ms 2024-12-09T15:33:50,142 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta after 4015ms 2024-12-09T15:33:50,923 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T15:33:50,935 DEBUG [RS:1;3651d8ead0f3:45133 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs 2024-12-09T15:33:50,935 INFO [RS:1;3651d8ead0f3:45133 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C45133%2C1733758388518:(num 1733758388815) 2024-12-09T15:33:50,935 DEBUG [RS:1;3651d8ead0f3:45133 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:50,935 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:33:50,935 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:33:50,936 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T15:33:50,936 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:33:50,936 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:33:50,936 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:33:50,936 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:33:50,936 INFO [RS:1;3651d8ead0f3:45133 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45133 2024-12-09T15:33:50,937 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:33:50,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,45133,1733758388518 2024-12-09T15:33:50,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:33:50,939 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:33:50,939 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,45133,1733758388518] 2024-12-09T15:33:50,941 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,45133,1733758388518 already deleted, retry=false 2024-12-09T15:33:50,941 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,45133,1733758388518 expired; onlineServers=1 2024-12-09T15:33:51,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:51,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45133-0x1012bd1124b0002, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:51,041 INFO [RS:1;3651d8ead0f3:45133 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:33:51,041 INFO [RS:1;3651d8ead0f3:45133 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,45133,1733758388518; zookeeper connection closed. 2024-12-09T15:33:51,042 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69ead637 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69ead637 2024-12-09T15:33:51,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:33:51,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:33:51,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,369 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,369 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,914 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:33:51,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:51,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:51,970 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T15:33:51,970 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:33:51,970 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:33:51,981 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T15:33:51,986 DEBUG [RS:0;3651d8ead0f3:34623 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs 2024-12-09T15:33:51,986 INFO [RS:0;3651d8ead0f3:34623 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C34623%2C1733758386874.meta:.meta(num 1733758425955) 2024-12-09T15:33:51,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:51,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:51,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:51,987 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:51,988 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:51,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741891_1075 (size=18156) 2024-12-09T15:33:51,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741891_1075 (size=18156) 2024-12-09T15:33:52,152 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-12-09T15:33:52,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:33:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741836_1012 (size=76) 2024-12-09T15:33:52,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:53,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:53,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:54,152 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-12-09T15:33:54,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:54,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:33:54,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:33:54,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:55,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:55,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:56,153 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-12-09T15:33:56,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:56,310 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2277c405 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1105912312-172.17.0.3-1733758385755:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39255,null,null]) java.net.ConnectException: Call From 3651d8ead0f3/172.17.0.3 to localhost:39353 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T15:33:56,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741837_1085 (size=85) 2024-12-09T15:33:56,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:56,988 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T15:33:56,993 DEBUG [RS:0;3651d8ead0f3:34623 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/oldWALs 2024-12-09T15:33:56,993 INFO [RS:0;3651d8ead0f3:34623 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C34623%2C1733758386874:(num 1733758425124) 2024-12-09T15:33:56,993 DEBUG [RS:0;3651d8ead0f3:34623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:33:56,993 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:33:56,993 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:33:56,993 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T15:33:56,993 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:33:56,993 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:33:56,993 INFO [RS:0;3651d8ead0f3:34623 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34623 2024-12-09T15:33:57,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:33:57,037 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:33:57,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,34623,1733758386874 2024-12-09T15:33:57,101 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,34623,1733758386874] 2024-12-09T15:33:57,145 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,34623,1733758386874 already deleted, retry=false 2024-12-09T15:33:57,145 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,34623,1733758386874 expired; onlineServers=0 2024-12-09T15:33:57,145 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3651d8ead0f3,43929,1733758386761' ***** 2024-12-09T15:33:57,145 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T15:33:57,145 INFO [M:0;3651d8ead0f3:43929 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:33:57,145 INFO [M:0;3651d8ead0f3:43929 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:33:57,146 DEBUG [M:0;3651d8ead0f3:43929 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T15:33:57,146 DEBUG [M:0;3651d8ead0f3:43929 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T15:33:57,146 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T15:33:57,146 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758387534 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758387534,5,FailOnTimeoutGroup] 2024-12-09T15:33:57,146 INFO [M:0;3651d8ead0f3:43929 {}] hbase.ChoreService(370): Chore service for: master/3651d8ead0f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T15:33:57,146 INFO [M:0;3651d8ead0f3:43929 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:33:57,146 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758387539 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758387539,5,FailOnTimeoutGroup] 2024-12-09T15:33:57,146 DEBUG [M:0;3651d8ead0f3:43929 {}] master.HMaster(1795): Stopping service threads 2024-12-09T15:33:57,146 INFO [M:0;3651d8ead0f3:43929 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T15:33:57,146 INFO [M:0;3651d8ead0f3:43929 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:33:57,147 INFO [M:0;3651d8ead0f3:43929 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T15:33:57,147 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T15:33:57,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:57,202 INFO [RS:0;3651d8ead0f3:34623 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:33:57,202 INFO [RS:0;3651d8ead0f3:34623 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,34623,1733758386874; zookeeper connection closed. 2024-12-09T15:33:57,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:57,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34623-0x1012bd1124b0001, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:33:57,203 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@243adebd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@243adebd 2024-12-09T15:33:57,204 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T15:33:57,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T15:33:57,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:33:57,207 DEBUG [M:0;3651d8ead0f3:43929 {}] zookeeper.ZKUtil(347): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T15:33:57,207 WARN [M:0;3651d8ead0f3:43929 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T15:33:57,208 INFO [M:0;3651d8ead0f3:43929 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/.lastflushedseqids 2024-12-09T15:33:57,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741912_1098 (size=130) 2024-12-09T15:33:57,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741912_1098 (size=130) 2024-12-09T15:33:57,234 INFO [M:0;3651d8ead0f3:43929 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T15:33:57,235 INFO [M:0;3651d8ead0f3:43929 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T15:33:57,236 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:33:57,236 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:57,236 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:57,236 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:33:57,236 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:57,236 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-12-09T15:33:57,276 DEBUG [M:0;3651d8ead0f3:43929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4ce3fb0be6ff48ada2a7875084a7c968 is 82, key is hbase:meta,,1/info:regioninfo/1733758388327/Put/seqid=0 2024-12-09T15:33:57,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741837_1085 (size=85) 2024-12-09T15:33:57,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741913_1099 (size=5672) 2024-12-09T15:33:57,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741913_1099 (size=5672) 2024-12-09T15:33:57,318 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4ce3fb0be6ff48ada2a7875084a7c968 2024-12-09T15:33:57,371 DEBUG [M:0;3651d8ead0f3:43929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e3e2dabde13453aa87a7c556402f728 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733758389167/Put/seqid=0 2024-12-09T15:33:57,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741914_1100 (size=6256) 2024-12-09T15:33:57,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741914_1100 (size=6256) 2024-12-09T15:33:57,427 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e3e2dabde13453aa87a7c556402f728 2024-12-09T15:33:57,438 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e3e2dabde13453aa87a7c556402f728 2024-12-09T15:33:57,461 DEBUG [M:0;3651d8ead0f3:43929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c79953c5003940a9b3256346d3e46c1e is 69, key is 3651d8ead0f3,34623,1733758386874/rs:state/1733758387594/Put/seqid=0 2024-12-09T15:33:57,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741915_1101 (size=5224) 2024-12-09T15:33:57,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741915_1101 (size=5224) 2024-12-09T15:33:57,473 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:33:57,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:33:57,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:57,872 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c79953c5003940a9b3256346d3e46c1e 2024-12-09T15:33:57,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:57,955 DEBUG [M:0;3651d8ead0f3:43929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02c1af5f6fa34b4fa8d3aa1c6ba905ee is 52, key is load_balancer_on/state:d/1733758388495/Put/seqid=0 2024-12-09T15:33:57,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741916_1102 (size=5056) 2024-12-09T15:33:57,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741916_1102 (size=5056) 2024-12-09T15:33:58,010 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02c1af5f6fa34b4fa8d3aa1c6ba905ee 2024-12-09T15:33:58,040 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4ce3fb0be6ff48ada2a7875084a7c968 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4ce3fb0be6ff48ada2a7875084a7c968 2024-12-09T15:33:58,047 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4ce3fb0be6ff48ada2a7875084a7c968, entries=8, sequenceid=60, filesize=5.5 K 2024-12-09T15:33:58,050 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e3e2dabde13453aa87a7c556402f728 as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e3e2dabde13453aa87a7c556402f728 2024-12-09T15:33:58,062 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e3e2dabde13453aa87a7c556402f728 2024-12-09T15:33:58,062 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e3e2dabde13453aa87a7c556402f728, entries=6, sequenceid=60, filesize=6.1 K 2024-12-09T15:33:58,070 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c79953c5003940a9b3256346d3e46c1e as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c79953c5003940a9b3256346d3e46c1e 2024-12-09T15:33:58,080 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c79953c5003940a9b3256346d3e46c1e, entries=2, sequenceid=60, filesize=5.1 K 2024-12-09T15:33:58,081 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/02c1af5f6fa34b4fa8d3aa1c6ba905ee as hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/02c1af5f6fa34b4fa8d3aa1c6ba905ee 2024-12-09T15:33:58,092 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/02c1af5f6fa34b4fa8d3aa1c6ba905ee, entries=1, sequenceid=60, filesize=4.9 K 2024-12-09T15:33:58,095 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 859ms, sequenceid=60, compaction requested=false 2024-12-09T15:33:58,123 INFO [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:33:58,124 DEBUG [M:0;3651d8ead0f3:43929 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758437235Disabling compacts and flushes for region at 1733758437235Disabling writes for close at 1733758437236 (+1 ms)Obtaining lock to block concurrent updates at 1733758437236Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733758437236Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1733758437237 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733758437239 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733758437239Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733758437276 (+37 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733758437276Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733758437340 (+64 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733758437367 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733758437367Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733758437438 (+71 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733758437460 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733758437460Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733758437880 (+420 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733758437954 (+74 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733758437954Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@772ff536: reopening flushed file at 1733758438039 (+85 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41ad1c10: reopening flushed file at 1733758438048 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11c3313c: reopening flushed file at 1733758438063 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cf8a08: reopening flushed file at 1733758438080 (+17 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 859ms, sequenceid=60, compaction requested=false at 1733758438095 (+15 ms)Writing region close event to WAL at 1733758438123 (+28 ms)Closed at 1733758438123 2024-12-09T15:33:58,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:58,132 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:58,132 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:58,132 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:58,132 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:33:58,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32937 is added to blk_1073741887_1070 (size=1045) 2024-12-09T15:33:58,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36667 is added to blk_1073741887_1070 (size=1045) 2024-12-09T15:33:58,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:58,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:58,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:59,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:59,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:33:59,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:00,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:00,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:00,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:01,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:01,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T15:34:01,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:01,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:02,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:02,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:02,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:03,133 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T15:34:03,133 INFO [M:0;3651d8ead0f3:43929 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T15:34:03,133 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:34:03,134 INFO [M:0;3651d8ead0f3:43929 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43929 2024-12-09T15:34:03,134 INFO [M:0;3651d8ead0f3:43929 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:34:03,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:03,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:34:03,330 INFO [M:0;3651d8ead0f3:43929 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:34:03,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43929-0x1012bd1124b0000, quorum=127.0.0.1:60306, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:34:03,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39eaf0e6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:03,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1542e930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:03,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:03,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7177a9b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:03,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61815e22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:03,387 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:34:03,387 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:34:03,386 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2277c405 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1105912312-172.17.0.3-1733758385755:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39255,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39353 , LocalHost:localPort 3651d8ead0f3/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T15:34:03,388 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105912312-172.17.0.3-1733758385755 (Datanode Uuid 5682c3b3-0aeb-4fcb-94ae-68e4c762f6bd) service to localhost/127.0.0.1:38871 2024-12-09T15:34:03,389 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data3/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:03,390 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data4/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:03,390 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:03,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b4117c9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:03,404 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ccc1bc4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:03,405 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:03,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca82099{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:03,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@156f3a55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:03,412 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:34:03,412 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:34:03,412 WARN [BP-1105912312-172.17.0.3-1733758385755 heartbeating to localhost/127.0.0.1:38871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105912312-172.17.0.3-1733758385755 (Datanode Uuid 046bf575-8dc6-4a60-8c58-4e0bc9aca94c) service to localhost/127.0.0.1:38871 2024-12-09T15:34:03,412 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:34:03,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data7/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:03,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/cluster_c242ba6a-a11d-1509-971b-bc8b2e7485d9/data/data8/current/BP-1105912312-172.17.0.3-1733758385755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:03,414 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:03,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72f8e821{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:34:03,428 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e26ba04{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:03,428 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:03,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14d09ab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:03,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6de86657{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:03,459 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T15:34:03,520 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T15:34:03,546 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 79) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:38871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$890/0x00007f79f4bef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38871 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:38871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$890/0x00007f79f4bef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$890/0x00007f79f4bef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$890/0x00007f79f4bef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=429 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=749 (was 561) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5995 (was 7647) 2024-12-09T15:34:03,561 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=429, MaxFileDescriptor=1048576, SystemLoadAverage=749, ProcessCount=11, AvailableMemoryMB=5986 2024-12-09T15:34:03,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T15:34:03,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.log.dir so I do NOT create it in target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb 2024-12-09T15:34:03,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28dc90cd-66f2-5fee-67cd-a76d7fc7e286/hadoop.tmp.dir so I do NOT create it in target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb 2024-12-09T15:34:03,562 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3, deleteOnExit=true 2024-12-09T15:34:03,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T15:34:03,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/test.cache.data in system properties and HBase conf 2024-12-09T15:34:03,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T15:34:03,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir in system properties and HBase conf 2024-12-09T15:34:03,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T15:34:03,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T15:34:03,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T15:34:03,564 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T15:34:03,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:34:03,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:34:03,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T15:34:03,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:34:03,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/nfs.dump.dir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T15:34:03,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T15:34:03,595 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:34:03,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:03,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:03,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:04,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:04,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:04,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:04,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:05,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:05,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:05,248 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:05,254 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:05,254 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:05,254 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:34:05,255 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:05,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25a29a07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:05,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7096145a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:05,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49ef22be{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir/jetty-localhost-33797-hadoop-hdfs-3_4_1-tests_jar-_-any-4417144987665896489/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:34:05,556 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b230242{HTTP/1.1, (http/1.1)}{localhost:33797} 2024-12-09T15:34:05,556 INFO [Time-limited test {}] server.Server(415): Started @171207ms 2024-12-09T15:34:05,571 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:34:05,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:05,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:05,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:06,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:06,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:06,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:06,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:06,974 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:34:06,987 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,988 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,988 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,988 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,989 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,989 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,993 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,993 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,993 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:06,997 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:07,011 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:07,023 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:07,027 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:07,027 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:07,027 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:34:07,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fc8bed8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:07,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13646a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:07,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fb42f9a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir/jetty-localhost-34311-hadoop-hdfs-3_4_1-tests_jar-_-any-4860656324550251431/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:07,189 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d74671d{HTTP/1.1, (http/1.1)}{localhost:34311} 2024-12-09T15:34:07,190 INFO [Time-limited test {}] server.Server(415): Started @172841ms 2024-12-09T15:34:07,191 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:07,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:07,274 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:07,282 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:07,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:07,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:07,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:34:07,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:07,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:07,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37687418{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir/jetty-localhost-40653-hadoop-hdfs-3_4_1-tests_jar-_-any-1315225266349603954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:07,583 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e7f214b{HTTP/1.1, (http/1.1)}{localhost:40653} 2024-12-09T15:34:07,584 INFO [Time-limited test {}] server.Server(415): Started @173235ms 2024-12-09T15:34:07,585 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:07,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:07,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:07,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:08,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:08,448 WARN [Thread-1222 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data1/current/BP-871040900-172.17.0.3-1733758443620/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:08,451 WARN [Thread-1223 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data2/current/BP-871040900-172.17.0.3-1733758443620/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:08,488 WARN [Thread-1228 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data3/current/BP-871040900-172.17.0.3-1733758443620/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:08,503 WARN [Thread-1229 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data4/current/BP-871040900-172.17.0.3-1733758443620/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:08,603 WARN [Thread-1186 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:08,628 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6457543715a8014b with lease ID 0xfe62ee5b35e6e230: Processing first storage report for DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30 from datanode DatanodeRegistration(127.0.0.1:36237, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=32867, infoSecurePort=0, ipcPort=33123, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620) 2024-12-09T15:34:08,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6457543715a8014b with lease ID 0xfe62ee5b35e6e230: from storage DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30 node DatanodeRegistration(127.0.0.1:36237, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=32867, infoSecurePort=0, ipcPort=33123, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:34:08,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6457543715a8014b with lease ID 0xfe62ee5b35e6e230: Processing first storage report for DS-35b94267-44e8-4623-8338-6ee2b9131c03 from datanode DatanodeRegistration(127.0.0.1:36237, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=32867, infoSecurePort=0, ipcPort=33123, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620) 2024-12-09T15:34:08,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6457543715a8014b with lease ID 0xfe62ee5b35e6e230: from storage DS-35b94267-44e8-4623-8338-6ee2b9131c03 node DatanodeRegistration(127.0.0.1:36237, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=32867, infoSecurePort=0, ipcPort=33123, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:08,646 WARN [Thread-1209 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:08,658 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14235ac297d24787 with lease ID 0xfe62ee5b35e6e231: Processing first storage report for DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4 from datanode DatanodeRegistration(127.0.0.1:38483, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=40151, infoSecurePort=0, ipcPort=45913, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620) 2024-12-09T15:34:08,658 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14235ac297d24787 with lease ID 0xfe62ee5b35e6e231: from storage DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4 node DatanodeRegistration(127.0.0.1:38483, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=40151, infoSecurePort=0, ipcPort=45913, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:08,662 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14235ac297d24787 with lease ID 0xfe62ee5b35e6e231: Processing first storage report for DS-9d31ca8d-2ec6-4866-975c-10c55c32c3b1 from datanode DatanodeRegistration(127.0.0.1:38483, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=40151, infoSecurePort=0, ipcPort=45913, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620) 2024-12-09T15:34:08,663 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14235ac297d24787 with lease ID 0xfe62ee5b35e6e231: from storage DS-9d31ca8d-2ec6-4866-975c-10c55c32c3b1 node DatanodeRegistration(127.0.0.1:38483, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=40151, infoSecurePort=0, ipcPort=45913, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:34:08,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:08,709 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb 2024-12-09T15:34:08,745 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/zookeeper_0, clientPort=55405, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T15:34:08,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:08,752 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55405 2024-12-09T15:34:08,752 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:08,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:08,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:34:08,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:34:08,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:09,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:09,245 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0 with version=8 2024-12-09T15:34:09,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase-staging 2024-12-09T15:34:09,248 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:34:09,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:09,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:09,249 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:34:09,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:09,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:34:09,249 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T15:34:09,249 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:34:09,259 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33343 2024-12-09T15:34:09,261 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33343 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-12-09T15:34:09,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:333430x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:34:09,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33343-0x1012bd2046e0000 connected 2024-12-09T15:34:09,495 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:09,511 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:09,531 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:34:09,531 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0, hbase.cluster.distributed=false 2024-12-09T15:34:09,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:34:09,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33343 2024-12-09T15:34:09,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33343 2024-12-09T15:34:09,578 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33343 2024-12-09T15:34:09,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33343 2024-12-09T15:34:09,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33343 2024-12-09T15:34:09,659 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:34:09,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:09,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:09,659 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:34:09,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:09,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:34:09,659 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:34:09,659 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:34:09,682 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44921 2024-12-09T15:34:09,684 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44921 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-12-09T15:34:09,685 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:09,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:09,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:449210x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:34:09,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:09,715 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:449210x0, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:34:09,715 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:34:09,723 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44921-0x1012bd2046e0001 connected 2024-12-09T15:34:09,738 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:34:09,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T15:34:09,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:09,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:34:09,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44921 2024-12-09T15:34:09,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44921 2024-12-09T15:34:09,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44921 2024-12-09T15:34:09,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44921 2024-12-09T15:34:09,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44921 2024-12-09T15:34:09,897 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3651d8ead0f3:33343 2024-12-09T15:34:09,902 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:09,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:09,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:09,919 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:09,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T15:34:09,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:09,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:09,930 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:34:09,931 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3651d8ead0f3,33343,1733758449248 from backup master directory 2024-12-09T15:34:09,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:09,933 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:34:09,933 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:09,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:09,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:09,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:09,987 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/hbase.id] with ID: 65a842b6-7987-4d90-8006-5460414ffd44 2024-12-09T15:34:09,987 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/.tmp/hbase.id 2024-12-09T15:34:10,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:34:10,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:34:10,055 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/.tmp/hbase.id]:[hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/hbase.id] 2024-12-09T15:34:10,183 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:10,183 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T15:34:10,192 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 9ms. 2024-12-09T15:34:10,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:10,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:10,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:10,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:34:10,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:34:10,319 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:34:10,320 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T15:34:10,330 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:34:10,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:34:10,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:34:10,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:10,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:10,819 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store 2024-12-09T15:34:10,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:34:10,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:34:10,924 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:10,924 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:34:10,924 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:10,924 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:10,924 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:34:10,924 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:10,924 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:10,924 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758450924Disabling compacts and flushes for region at 1733758450924Disabling writes for close at 1733758450924Writing region close event to WAL at 1733758450924Closed at 1733758450924 2024-12-09T15:34:10,938 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/.initializing 2024-12-09T15:34:10,938 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:10,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:10,963 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C33343%2C1733758449248, suffix=, logDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248, archiveDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/oldWALs, maxLogs=10 2024-12-09T15:34:10,964 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C33343%2C1733758449248.1733758450964 2024-12-09T15:34:11,038 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 2024-12-09T15:34:11,058 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40151:40151),(127.0.0.1/127.0.0.1:32867:32867)] 2024-12-09T15:34:11,081 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:34:11,082 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:11,082 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,082 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T15:34:11,115 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:11,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T15:34:11,136 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:11,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T15:34:11,143 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:11,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T15:34:11,167 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:11,173 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,174 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,174 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,176 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,176 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,177 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T15:34:11,178 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:11,197 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:34:11,198 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725733, jitterRate=-0.07718278467655182}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T15:34:11,200 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733758451082Initializing all the Stores at 1733758451094 (+12 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758451094Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758451099 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758451099Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758451099Cleaning up temporary data from old regions at 1733758451176 (+77 ms)Region opened successfully at 1733758451199 (+23 ms) 2024-12-09T15:34:11,202 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T15:34:11,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:11,214 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@efb323d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:34:11,218 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T15:34:11,219 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T15:34:11,219 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T15:34:11,219 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T15:34:11,220 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T15:34:11,221 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T15:34:11,221 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T15:34:11,237 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T15:34:11,242 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T15:34:11,246 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T15:34:11,247 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T15:34:11,248 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T15:34:11,251 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T15:34:11,251 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T15:34:11,253 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T15:34:11,258 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T15:34:11,264 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T15:34:11,269 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T15:34:11,305 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T15:34:11,310 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T15:34:11,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:34:11,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:11,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:34:11,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:11,319 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3651d8ead0f3,33343,1733758449248, sessionid=0x1012bd2046e0000, setting cluster-up flag (Was=false) 2024-12-09T15:34:11,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:11,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:11,350 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T15:34:11,364 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:11,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:11,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:11,406 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T15:34:11,408 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:11,411 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T15:34:11,431 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:11,431 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T15:34:11,432 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T15:34:11,432 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3651d8ead0f3,33343,1733758449248 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T15:34:11,467 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:11,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:11,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:11,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:11,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3651d8ead0f3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T15:34:11,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:34:11,468 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,508 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:11,508 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T15:34:11,518 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733758481518 2024-12-09T15:34:11,518 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T15:34:11,518 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T15:34:11,519 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T15:34:11,519 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T15:34:11,519 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T15:34:11,519 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T15:34:11,526 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,531 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T15:34:11,531 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T15:34:11,531 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T15:34:11,533 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T15:34:11,534 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T15:34:11,537 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758451534,5,FailOnTimeoutGroup] 2024-12-09T15:34:11,538 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(746): ClusterId : 65a842b6-7987-4d90-8006-5460414ffd44 2024-12-09T15:34:11,538 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:34:11,542 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758451537,5,FailOnTimeoutGroup] 2024-12-09T15:34:11,542 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,542 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T15:34:11,543 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,543 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,547 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,547 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T15:34:11,552 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:34:11,552 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:34:11,590 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:34:11,591 DEBUG [RS:0;3651d8ead0f3:44921 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e611917, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:34:11,629 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3651d8ead0f3:44921 2024-12-09T15:34:11,630 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:34:11,630 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:34:11,630 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:34:11,640 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,33343,1733758449248 with port=44921, startcode=1733758449658 2024-12-09T15:34:11,641 DEBUG [RS:0;3651d8ead0f3:44921 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:34:11,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:34:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:34:11,661 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48749, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:34:11,662 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T15:34:11,662 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0 2024-12-09T15:34:11,665 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33343 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:11,665 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33343 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:11,684 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0 2024-12-09T15:34:11,684 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35925 2024-12-09T15:34:11,684 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:34:11,686 DEBUG [RS:0;3651d8ead0f3:44921 {}] zookeeper.ZKUtil(111): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:11,686 WARN [RS:0;3651d8ead0f3:44921 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:34:11,686 INFO [RS:0;3651d8ead0f3:44921 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:34:11,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:34:11,686 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:11,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T15:34:11,689 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T15:34:11,699 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,44921,1733758449658] 2024-12-09T15:34:11,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:11,715 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:34:11,720 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:34:11,722 INFO [RS:0;3651d8ead0f3:44921 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:34:11,723 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,726 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:34:11,730 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:34:11,731 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,731 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,732 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,732 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:11,732 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:34:11,732 DEBUG [RS:0;3651d8ead0f3:44921 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:34:11,733 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,733 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,733 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,733 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,734 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,734 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44921,1733758449658-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:34:11,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:34:11,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:34:11,755 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:11,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:11,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:34:11,768 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:34:11,774 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44921,1733758449658-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,775 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,775 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.Replication(171): 3651d8ead0f3,44921,1733758449658 started 2024-12-09T15:34:11,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:34:11,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,776 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:11,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:34:11,783 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:34:11,783 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:11,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:34:11,798 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:11,798 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,44921,1733758449658, RpcServer on 3651d8ead0f3/172.17.0.3:44921, sessionid=0x1012bd2046e0001 2024-12-09T15:34:11,798 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:34:11,798 DEBUG [RS:0;3651d8ead0f3:44921 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:11,798 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,44921,1733758449658' 2024-12-09T15:34:11,798 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:34:11,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:34:11,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,800 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:34:11,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:11,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:34:11,800 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:34:11,800 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:34:11,800 DEBUG [RS:0;3651d8ead0f3:44921 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:11,801 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,44921,1733758449658' 2024-12-09T15:34:11,801 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:34:11,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:34:11,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:11,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:11,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:34:11,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740 2024-12-09T15:34:11,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740 2024-12-09T15:34:11,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:34:11,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:34:11,814 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:34:11,814 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:34:11,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:34:11,820 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:34:11,821 DEBUG [RS:0;3651d8ead0f3:44921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:34:11,821 INFO [RS:0;3651d8ead0f3:44921 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:34:11,821 INFO [RS:0;3651d8ead0f3:44921 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:34:11,821 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708724, jitterRate=-0.09881199896335602}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:34:11,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733758451755Initializing all the Stores at 1733758451767 (+12 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758451767Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758451767Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758451767Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758451767Cleaning up temporary data from old regions at 1733758451813 (+46 ms)Region opened successfully at 1733758451822 (+9 ms) 2024-12-09T15:34:11,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:34:11,822 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:34:11,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:34:11,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:34:11,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:34:11,823 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:34:11,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758451822Disabling compacts and flushes for region at 1733758451822Disabling writes for close at 1733758451822Writing region close event to WAL at 1733758451823 (+1 ms)Closed at 1733758451823 2024-12-09T15:34:11,824 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:11,824 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T15:34:11,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T15:34:11,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:34:11,828 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T15:34:11,927 INFO [RS:0;3651d8ead0f3:44921 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C44921%2C1733758449658, suffix=, logDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658, archiveDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/oldWALs, maxLogs=32 2024-12-09T15:34:11,932 INFO [RS:0;3651d8ead0f3:44921 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:11,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:11,979 WARN [3651d8ead0f3:33343 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T15:34:11,998 INFO [RS:0;3651d8ead0f3:44921 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:12,039 DEBUG [RS:0;3651d8ead0f3:44921 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40151:40151),(127.0.0.1/127.0.0.1:32867:32867)] 2024-12-09T15:34:12,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:12,229 DEBUG [3651d8ead0f3:33343 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T15:34:12,231 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:12,243 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,44921,1733758449658, state=OPENING 2024-12-09T15:34:12,250 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T15:34:12,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:12,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:12,256 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:12,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:12,257 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:34:12,258 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,44921,1733758449658}] 2024-12-09T15:34:12,421 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T15:34:12,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44191, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T15:34:12,442 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T15:34:12,443 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:34:12,446 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C44921%2C1733758449658.meta, suffix=.meta, logDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658, archiveDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/oldWALs, maxLogs=32 2024-12-09T15:34:12,451 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta 2024-12-09T15:34:12,523 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta 2024-12-09T15:34:12,553 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40151:40151),(127.0.0.1/127.0.0.1:32867:32867)] 2024-12-09T15:34:12,578 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:34:12,579 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T15:34:12,579 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T15:34:12,579 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T15:34:12,579 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T15:34:12,579 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:12,579 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T15:34:12,579 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T15:34:12,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:34:12,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:34:12,607 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:12,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:12,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:34:12,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:34:12,623 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:12,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:12,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:34:12,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:34:12,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:12,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:12,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:34:12,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:34:12,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:12,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:12,664 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:34:12,672 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740 2024-12-09T15:34:12,687 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740 2024-12-09T15:34:12,693 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:34:12,693 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:34:12,695 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:34:12,699 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:34:12,707 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696750, jitterRate=-0.1140373945236206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:34:12,707 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T15:34:12,708 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733758452580Writing region info on filesystem at 1733758452580Initializing all the Stores at 1733758452594 (+14 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758452595 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758452599 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758452599Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758452599Cleaning up temporary data from old regions at 1733758452693 (+94 ms)Running coprocessor post-open hooks at 1733758452707 (+14 ms)Region opened successfully at 1733758452708 (+1 ms) 2024-12-09T15:34:12,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:12,715 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733758452421 2024-12-09T15:34:12,731 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T15:34:12,731 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T15:34:12,746 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:12,751 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,44921,1733758449658, state=OPEN 2024-12-09T15:34:12,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:12,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:34:12,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:34:12,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:12,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:12,761 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:12,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T15:34:12,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,44921,1733758449658 in 504 msec 2024-12-09T15:34:12,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T15:34:12,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 944 msec 2024-12-09T15:34:12,794 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:12,794 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T15:34:12,811 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:34:12,811 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,44921,1733758449658, seqNum=-1] 2024-12-09T15:34:12,811 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:34:12,819 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41891, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:34:12,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4500 sec 2024-12-09T15:34:12,878 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733758452878, completionTime=-1 2024-12-09T15:34:12,878 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T15:34:12,879 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T15:34:12,884 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T15:34:12,884 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733758512884 2024-12-09T15:34:12,884 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733758572884 2024-12-09T15:34:12,884 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 5 msec 2024-12-09T15:34:12,884 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,33343,1733758449248-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:12,884 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,33343,1733758449248-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:12,884 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,33343,1733758449248-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:12,885 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3651d8ead0f3:33343, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:12,885 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:12,887 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:12,889 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T15:34:12,894 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.961sec 2024-12-09T15:34:12,894 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T15:34:12,895 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T15:34:12,895 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T15:34:12,895 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T15:34:12,895 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T15:34:12,895 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,33343,1733758449248-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:34:12,895 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,33343,1733758449248-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T15:34:12,913 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T15:34:12,913 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T15:34:12,913 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,33343,1733758449248-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:12,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6beed7e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:34:12,951 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3651d8ead0f3,33343,-1 for getting cluster id 2024-12-09T15:34:12,951 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T15:34:12,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:12,965 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '65a842b6-7987-4d90-8006-5460414ffd44' 2024-12-09T15:34:12,966 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T15:34:12,966 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "65a842b6-7987-4d90-8006-5460414ffd44" 2024-12-09T15:34:12,967 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@372ab5ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:34:12,967 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3651d8ead0f3,33343,-1] 2024-12-09T15:34:12,967 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T15:34:12,970 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:34:12,975 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T15:34:12,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61726e31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:34:12,982 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:34:12,989 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,44921,1733758449658, seqNum=-1] 2024-12-09T15:34:12,989 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:34:13,003 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:34:13,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:13,014 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:13,023 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T15:34:13,023 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-09T15:34:13,023 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-09T15:34:13,023 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T15:34:13,024 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:13,025 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@738894c4 2024-12-09T15:34:13,025 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T15:34:13,030 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33256, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T15:34:13,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33343 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T15:34:13,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33343 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T15:34:13,031 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33343 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:34:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33343 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T15:34:13,039 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T15:34:13,039 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:13,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33343 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-09T15:34:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33343 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:34:13,044 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T15:34:13,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741835_1011 (size=395) 2024-12-09T15:34:13,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741835_1011 (size=395) 2024-12-09T15:34:13,072 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5bd5b4f56ae618186e84cb72daaf6e80, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0 2024-12-09T15:34:13,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38483 is added to blk_1073741836_1012 (size=78) 2024-12-09T15:34:13,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36237 is added to blk_1073741836_1012 (size=78) 2024-12-09T15:34:13,143 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:13,143 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 5bd5b4f56ae618186e84cb72daaf6e80, disabling compactions & flushes 2024-12-09T15:34:13,143 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:13,143 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:13,143 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. after waiting 0 ms 2024-12-09T15:34:13,143 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:13,143 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:13,143 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5bd5b4f56ae618186e84cb72daaf6e80: Waiting for close lock at 1733758453143Disabling compacts and flushes for region at 1733758453143Disabling writes for close at 1733758453143Writing region close event to WAL at 1733758453143Closed at 1733758453143 2024-12-09T15:34:13,148 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T15:34:13,149 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733758453148"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733758453148"}]},"ts":"1733758453148"} 2024-12-09T15:34:13,163 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T15:34:13,168 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T15:34:13,169 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758453168"}]},"ts":"1733758453168"} 2024-12-09T15:34:13,185 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-09T15:34:13,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5bd5b4f56ae618186e84cb72daaf6e80, ASSIGN}] 2024-12-09T15:34:13,195 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5bd5b4f56ae618186e84cb72daaf6e80, ASSIGN 2024-12-09T15:34:13,199 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5bd5b4f56ae618186e84cb72daaf6e80, ASSIGN; state=OFFLINE, location=3651d8ead0f3,44921,1733758449658; forceNewPlan=false, retain=false 2024-12-09T15:34:13,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:13,350 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5bd5b4f56ae618186e84cb72daaf6e80, regionState=OPENING, regionLocation=3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:13,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5bd5b4f56ae618186e84cb72daaf6e80, ASSIGN because future has completed 2024-12-09T15:34:13,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5bd5b4f56ae618186e84cb72daaf6e80, server=3651d8ead0f3,44921,1733758449658}] 2024-12-09T15:34:13,533 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:13,534 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5bd5b4f56ae618186e84cb72daaf6e80, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:34:13,534 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,535 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:13,535 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,535 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,540 INFO [StoreOpener-5bd5b4f56ae618186e84cb72daaf6e80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,542 INFO [StoreOpener-5bd5b4f56ae618186e84cb72daaf6e80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5bd5b4f56ae618186e84cb72daaf6e80 columnFamilyName info 2024-12-09T15:34:13,543 DEBUG [StoreOpener-5bd5b4f56ae618186e84cb72daaf6e80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:13,544 INFO [StoreOpener-5bd5b4f56ae618186e84cb72daaf6e80-1 {}] regionserver.HStore(327): Store=5bd5b4f56ae618186e84cb72daaf6e80/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:13,545 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,545 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,546 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,547 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,547 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,552 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,560 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:34:13,561 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5bd5b4f56ae618186e84cb72daaf6e80; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732307, jitterRate=-0.06882350146770477}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T15:34:13,561 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:13,562 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5bd5b4f56ae618186e84cb72daaf6e80: Running coprocessor pre-open hook at 1733758453535Writing region info on filesystem at 1733758453535Initializing all the Stores at 1733758453540 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758453540Cleaning up temporary data from old regions at 1733758453547 (+7 ms)Running coprocessor post-open hooks at 1733758453561 (+14 ms)Region opened successfully at 1733758453562 (+1 ms) 2024-12-09T15:34:13,580 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80., pid=6, masterSystemTime=1733758453523 2024-12-09T15:34:13,593 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:13,593 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:13,595 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5bd5b4f56ae618186e84cb72daaf6e80, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:13,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5bd5b4f56ae618186e84cb72daaf6e80, server=3651d8ead0f3,44921,1733758449658 because future has completed 2024-12-09T15:34:13,637 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T15:34:13,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5bd5b4f56ae618186e84cb72daaf6e80, server=3651d8ead0f3,44921,1733758449658 in 264 msec 2024-12-09T15:34:13,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T15:34:13,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5bd5b4f56ae618186e84cb72daaf6e80, ASSIGN in 452 msec 2024-12-09T15:34:13,649 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T15:34:13,649 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758453649"}]},"ts":"1733758453649"} 2024-12-09T15:34:13,655 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-09T15:34:13,656 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T15:34:13,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 625 msec 2024-12-09T15:34:13,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:13,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:13,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:14,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:14,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:14,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:14,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:15,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:15,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:15,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:15,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:16,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:16,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:16,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:16,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:17,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:17,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,596 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:17,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:17,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:17,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:18,109 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:34:18,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:18,142 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T15:34:18,143 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-09T15:34:18,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:18,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:18,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:18,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:19,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:19,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:19,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:19,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:20,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:20,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:20,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:20,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:21,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:21,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:34:21,689 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T15:34:21,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T15:34:21,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-09T15:34:21,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:34:21,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T15:34:21,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:21,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:21,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:22,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:22,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:22,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:22,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:23,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33343 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:34:23,084 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-09T15:34:23,084 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-09T15:34:23,088 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T15:34:23,088 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:23,093 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80., hostname=3651d8ead0f3,44921,1733758449658, seqNum=2] 2024-12-09T15:34:23,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:23,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:23,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:23,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:24,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:24,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:24,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:24,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:25,096 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:25,097 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:25,097 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:25,097 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:25,098 WARN [DataStreamer for file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta block BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38483,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK], DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38483,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]) is bad. 2024-12-09T15:34:25,098 WARN [DataStreamer for file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 block BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38483,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK], DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38483,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]) is bad. 2024-12-09T15:34:25,098 WARN [DataStreamer for file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 block BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38483,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK], DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38483,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]) is bad. 2024-12-09T15:34:25,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:36798 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36798 dst: /127.0.0.1:38483 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,099 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_422921290_22 at /127.0.0.1:36760 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36760 dst: /127.0.0.1:38483 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,099 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:36782 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36782 dst: /127.0.0.1:38483 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,099 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:57946 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57946 dst: /127.0.0.1:36237 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:57932 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57932 dst: /127.0.0.1:36237 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_422921290_22 at /127.0.0.1:57898 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57898 dst: /127.0.0.1:36237 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37687418{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:25,125 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e7f214b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:25,125 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:25,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:25,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:25,134 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:34:25,134 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:34:25,134 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871040900-172.17.0.3-1733758443620 (Datanode Uuid c9b088f4-8ae4-4179-83dc-8e29ab739952) service to localhost/127.0.0.1:35925 2024-12-09T15:34:25,134 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:34:25,135 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data4/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:25,136 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:25,137 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data3/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:25,191 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:25,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:25,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:25,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:25,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:34:25,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:25,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:25,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:25,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c5aa216{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir/jetty-localhost-33381-hadoop-hdfs-3_4_1-tests_jar-_-any-15356307060593390193/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:25,390 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:33381} 2024-12-09T15:34:25,390 INFO [Time-limited test {}] server.Server(415): Started @191041ms 2024-12-09T15:34:25,391 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:25,463 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:25,463 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:25,463 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:25,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:33914 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33914 dst: /127.0.0.1:36237 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,465 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:33910 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33910 dst: /127.0.0.1:36237 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_422921290_22 at /127.0.0.1:33928 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33928 dst: /127.0.0.1:36237 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:25,475 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fb42f9a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:25,482 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d74671d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:25,483 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:25,483 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13646a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:25,483 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fc8bed8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:25,502 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:34:25,503 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871040900-172.17.0.3-1733758443620 (Datanode Uuid fdd28ed2-4e85-4af5-82a0-58145929e994) service to localhost/127.0.0.1:35925 2024-12-09T15:34:25,506 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:34:25,506 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:34:25,507 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data1/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:25,507 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data2/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:25,509 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:25,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:25,544 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:25,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:25,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:25,558 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:34:25,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:25,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:25,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:25,747 WARN [Thread-1357 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:25,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:25,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f3563521d175e14 with lease ID 0xfe62ee5b35e6e232: from storage DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4 node DatanodeRegistration(127.0.0.1:36345, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=35571, infoSecurePort=0, ipcPort=36981, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:25,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f3563521d175e14 with lease ID 0xfe62ee5b35e6e232: from storage DS-9d31ca8d-2ec6-4866-975c-10c55c32c3b1 node DatanodeRegistration(127.0.0.1:36345, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=35571, infoSecurePort=0, ipcPort=36981, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:25,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7180ac25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir/jetty-localhost-40841-hadoop-hdfs-3_4_1-tests_jar-_-any-6388860308205723934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:25,845 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:40841} 2024-12-09T15:34:25,845 INFO [Time-limited test {}] server.Server(415): Started @191496ms 2024-12-09T15:34:25,847 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:25,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:26,029 WARN [Thread-1388 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:26,039 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4e846d5c515f616 with lease ID 0xfe62ee5b35e6e233: from storage DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30 node DatanodeRegistration(127.0.0.1:37105, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=35671, infoSecurePort=0, ipcPort=41457, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:26,039 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4e846d5c515f616 with lease ID 0xfe62ee5b35e6e233: from storage DS-35b94267-44e8-4623-8338-6ee2b9131c03 node DatanodeRegistration(127.0.0.1:37105, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=35671, infoSecurePort=0, ipcPort=41457, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:34:26,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:26,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:26,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:26,931 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-09T15:34:26,934 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-09T15:34:26,936 ERROR [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:26,936 WARN [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:26,936 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C44921%2C1733758449658:(num 1733758451932) roll requested 2024-12-09T15:34:26,937 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:26,981 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 newFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:26,981 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:26,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:26,982 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:26,982 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:26,983 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:26,983 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:26,983 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:26,985 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:26,986 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:26,986 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:26,988 WARN [IPC Server handler 4 on default port 35925 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-09T15:34:26,994 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 after 8ms 2024-12-09T15:34:27,006 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35671:35671),(127.0.0.1/127.0.0.1:35571:35571)] 2024-12-09T15:34:27,007 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 is not closed yet, will try archiving it next time 2024-12-09T15:34:27,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:27,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:27,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:27,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 after 68139ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:27,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:28,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:28,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:28,768 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T15:34:28,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:28,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:29,025 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-09T15:34:29,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:29,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:29,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:29,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:30,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:30,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:30,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:30,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:30,996 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 after 4010ms 2024-12-09T15:34:31,029 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:31,030 WARN [DataStreamer for file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 block BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37105,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK], DatanodeInfoWithStorage[127.0.0.1:36345,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37105,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]) is bad. 2024-12-09T15:34:31,030 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:41730 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36345:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41730 dst: /127.0.0.1:36345 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:31,031 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:35640 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35640 dst: /127.0.0.1:37105 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:31,038 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871040900-172.17.0.3-1733758443620 (Datanode Uuid fdd28ed2-4e85-4af5-82a0-58145929e994) service to localhost/127.0.0.1:35925 2024-12-09T15:34:31,040 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data2/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:31,040 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data1/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:31,041 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7180ac25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:31,042 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:31,042 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:31,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:31,046 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:31,064 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:31,110 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:31,114 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:31,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:31,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:31,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:34:31,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19016e01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:31,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ef50a45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:31,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:31,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6db938{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir/jetty-localhost-36719-hadoop-hdfs-3_4_1-tests_jar-_-any-13709575366400512199/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:31,287 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cf288ef{HTTP/1.1, (http/1.1)}{localhost:36719} 2024-12-09T15:34:31,287 INFO [Time-limited test {}] server.Server(415): Started @196938ms 2024-12-09T15:34:31,288 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:31,326 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:31,327 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1780105731_22 at /127.0.0.1:47576 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36345:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47576 dst: /127.0.0.1:36345 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:31,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c5aa216{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:31,334 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:31,334 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:31,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:31,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:31,337 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:34:31,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:34:31,337 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871040900-172.17.0.3-1733758443620 (Datanode Uuid c9b088f4-8ae4-4179-83dc-8e29ab739952) service to localhost/127.0.0.1:35925 2024-12-09T15:34:31,337 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:34:31,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data3/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:31,339 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data4/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:31,339 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:31,378 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:31,384 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:31,385 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:31,385 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:31,385 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:34:31,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c39138a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:31,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f3e5a16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:31,464 WARN [Thread-1431 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:31,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71fe1cd6bac20428 with lease ID 0xfe62ee5b35e6e234: from storage DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30 node DatanodeRegistration(127.0.0.1:45885, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=34063, infoSecurePort=0, ipcPort=43695, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:31,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71fe1cd6bac20428 with lease ID 0xfe62ee5b35e6e234: from storage DS-35b94267-44e8-4623-8338-6ee2b9131c03 node DatanodeRegistration(127.0.0.1:45885, datanodeUuid=fdd28ed2-4e85-4af5-82a0-58145929e994, infoPort=34063, infoSecurePort=0, ipcPort=43695, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:31,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41b9791e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/java.io.tmpdir/jetty-localhost-36169-hadoop-hdfs-3_4_1-tests_jar-_-any-4018185880810880119/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:31,558 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1240a3cc{HTTP/1.1, (http/1.1)}{localhost:36169} 2024-12-09T15:34:31,558 INFO [Time-limited test {}] server.Server(415): Started @197209ms 2024-12-09T15:34:31,563 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:31,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:31,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:31,786 WARN [Thread-1462 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:31,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf9df4ee63baa26c with lease ID 0xfe62ee5b35e6e235: from storage DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4 node DatanodeRegistration(127.0.0.1:42259, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=34623, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:34:31,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf9df4ee63baa26c with lease ID 0xfe62ee5b35e6e235: from storage DS-9d31ca8d-2ec6-4866-975c-10c55c32c3b1 node DatanodeRegistration(127.0.0.1:42259, datanodeUuid=c9b088f4-8ae4-4179-83dc-8e29ab739952, infoPort=34623, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=431584924;c=1733758443620), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:31,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:32,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:32,627 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-09T15:34:32,630 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-09T15:34:32,632 ERROR [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36345,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:32,632 WARN [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36345,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:32,632 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C44921%2C1733758449658:(num 1733758466936) roll requested 2024-12-09T15:34:32,632 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44921%2C1733758449658.1733758472632 2024-12-09T15:34:32,640 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 newFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 2024-12-09T15:34:32,640 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:32,640 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:32,640 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:32,640 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:32,640 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:32,641 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 2024-12-09T15:34:32,641 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36345,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:32,641 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36345,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:32,641 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:32,641 WARN [IPC Server handler 1 on default port 35925 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-09T15:34:32,642 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 after 1ms 2024-12-09T15:34:32,644 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34623:34623),(127.0.0.1/127.0.0.1:34063:34063)] 2024-12-09T15:34:32,644 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 is not closed yet, will try archiving it next time 2024-12-09T15:34:32,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:32,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:32,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:33,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:33,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:33,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:33,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:34,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:34,470 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T15:34:34,647 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:34,659 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 newFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:34,659 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:34,659 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:34,662 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:34,662 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:34,663 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:34,663 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:34,671 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063),(127.0.0.1/127.0.0.1:34623:34623)] 2024-12-09T15:34:34,671 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 is not closed yet, will try archiving it next time 2024-12-09T15:34:34,671 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 is not closed yet, will try archiving it next time 2024-12-09T15:34:34,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741838_1019 (size=1264) 2024-12-09T15:34:34,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741838_1019 (size=1264) 2024-12-09T15:34:34,674 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:34,674 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:34,675 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 is not closed yet, will try archiving it next time 2024-12-09T15:34:34,679 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 after 5ms 2024-12-09T15:34:34,679 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:34,707 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733758453562/Put/vlen=218/seqid=0] 2024-12-09T15:34:34,707 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733758463094/Put/vlen=1045/seqid=0] 2024-12-09T15:34:34,708 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758451932 2024-12-09T15:34:34,708 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:34,708 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:34,711 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 after 2ms 2024-12-09T15:34:34,711 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:34,715 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733758466936/Put/vlen=1045/seqid=0] 2024-12-09T15:34:34,716 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733758469027/Put/vlen=1045/seqid=0] 2024-12-09T15:34:34,716 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 2024-12-09T15:34:34,716 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 2024-12-09T15:34:34,716 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 2024-12-09T15:34:34,716 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 after 0ms 2024-12-09T15:34:34,716 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758472632 2024-12-09T15:34:34,720 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733758472631/Put/vlen=1045/seqid=0] 2024-12-09T15:34:34,720 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:34,721 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:34,721 WARN [IPC Server handler 0 on default port 35925 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-09T15:34:34,722 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 after 1ms 2024-12-09T15:34:34,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:34,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:34,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:35,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:35,471 WARN [ResponseProcessor for block BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:35,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_422921290_22 at /127.0.0.1:45800 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45885:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45800 dst: /127.0.0.1:45885 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45885 remote=/127.0.0.1:45800]. Total timeout mills is 60000, 59185 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:35,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_422921290_22 at /127.0.0.1:38784 [Receiving block BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42259:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38784 dst: /127.0.0.1:42259 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:35,471 WARN [DataStreamer for file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 block BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45885,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK], DatanodeInfoWithStorage[127.0.0.1:42259,DS-96a0f0d9-caaa-44e0-a1bd-cc706d4ae7b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45885,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]) is bad. 2024-12-09T15:34:35,472 WARN [DataStreamer for file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 block BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:35,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741839_1022 (size=85) 2024-12-09T15:34:35,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741839_1022 (size=85) 2024-12-09T15:34:35,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:35,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:35,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:36,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:36,643 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758466936 after 4002ms 2024-12-09T15:34:36,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:36,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:36,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:37,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:37,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:37,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:37,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:38,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:38,710 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:34:38,723 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 after 4002ms 2024-12-09T15:34:38,723 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:38,728 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:38,728 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-09T15:34:38,729 ERROR [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:38,729 WARN [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:38,729 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C44921%2C1733758449658.meta:.meta(num 1733758452451) roll requested 2024-12-09T15:34:38,729 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44921%2C1733758449658.meta.1733758478729.meta 2024-12-09T15:34:38,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:38,754 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:38,754 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:38,758 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:38,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:38,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:38,759 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758478729.meta 2024-12-09T15:34:38,765 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:38,765 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:38,765 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta 2024-12-09T15:34:38,766 WARN [IPC Server handler 1 on default port 35925 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-12-09T15:34:38,771 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063),(127.0.0.1/127.0.0.1:34623:34623)] 2024-12-09T15:34:38,771 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta is not closed yet, will try archiving it next time 2024-12-09T15:34:38,774 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta after 9ms 2024-12-09T15:34:38,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:38,828 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/info/0f1840f6feb54092a0120d19d5d66a4d is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80./info:regioninfo/1733758453595/Put/seqid=0 2024-12-09T15:34:38,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741841_1025 (size=7125) 2024-12-09T15:34:38,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741841_1025 (size=7125) 2024-12-09T15:34:38,848 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/info/0f1840f6feb54092a0120d19d5d66a4d 2024-12-09T15:34:38,895 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/ns/823be81c2d0f4f099ad5ce95bde12492 is 43, key is default/ns:d/1733758452821/Put/seqid=0 2024-12-09T15:34:38,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741842_1026 (size=5153) 2024-12-09T15:34:38,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741842_1026 (size=5153) 2024-12-09T15:34:38,941 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/ns/823be81c2d0f4f099ad5ce95bde12492 2024-12-09T15:34:38,978 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/table/4cb5b422288b4afb81ec4f259243cb0b is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733758453649/Put/seqid=0 2024-12-09T15:34:38,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:39,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741843_1027 (size=5438) 2024-12-09T15:34:39,026 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/table/4cb5b422288b4afb81ec4f259243cb0b 2024-12-09T15:34:39,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741843_1027 (size=5438) 2024-12-09T15:34:39,041 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/info/0f1840f6feb54092a0120d19d5d66a4d as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/info/0f1840f6feb54092a0120d19d5d66a4d 2024-12-09T15:34:39,057 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/info/0f1840f6feb54092a0120d19d5d66a4d, entries=10, sequenceid=11, filesize=7.0 K 2024-12-09T15:34:39,058 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/ns/823be81c2d0f4f099ad5ce95bde12492 as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/ns/823be81c2d0f4f099ad5ce95bde12492 2024-12-09T15:34:39,071 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/ns/823be81c2d0f4f099ad5ce95bde12492, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T15:34:39,073 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/.tmp/table/4cb5b422288b4afb81ec4f259243cb0b as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/table/4cb5b422288b4afb81ec4f259243cb0b 2024-12-09T15:34:39,087 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/table/4cb5b422288b4afb81ec4f259243cb0b, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T15:34:39,096 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 368ms, sequenceid=11, compaction requested=false 2024-12-09T15:34:39,096 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T15:34:39,096 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5bd5b4f56ae618186e84cb72daaf6e80 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-09T15:34:39,097 ERROR [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:39,097 WARN [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0-prefix:3651d8ead0f3,44921,1733758449658 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:39,098 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C44921%2C1733758449658:(num 1733758474646) roll requested 2024-12-09T15:34:39,098 INFO [regionserver/3651d8ead0f3:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44921%2C1733758449658.1733758479098 2024-12-09T15:34:39,120 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 newFile=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758479098 2024-12-09T15:34:39,122 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,123 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,126 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,130 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,132 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,135 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758479098 2024-12-09T15:34:39,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:39,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-871040900-172.17.0.3-1733758443620:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:39,136 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:39,137 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 after 1ms 2024-12-09T15:34:39,146 DEBUG [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34623:34623),(127.0.0.1/127.0.0.1:34063:34063)] 2024-12-09T15:34:39,149 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 to hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/oldWALs/3651d8ead0f3%2C44921%2C1733758449658.1733758474646 2024-12-09T15:34:39,194 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80/.tmp/info/9a95e7e9d2844989873aebd24b4ee409 is 1080, key is row1002/info:/1733758463094/Put/seqid=0 2024-12-09T15:34:39,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741845_1029 (size=9270) 2024-12-09T15:34:39,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741845_1029 (size=9270) 2024-12-09T15:34:39,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:39,614 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80/.tmp/info/9a95e7e9d2844989873aebd24b4ee409 2024-12-09T15:34:39,626 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80/.tmp/info/9a95e7e9d2844989873aebd24b4ee409 as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80/info/9a95e7e9d2844989873aebd24b4ee409 2024-12-09T15:34:39,638 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80/info/9a95e7e9d2844989873aebd24b4ee409, entries=4, sequenceid=8, filesize=9.1 K 2024-12-09T15:34:39,640 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 5bd5b4f56ae618186e84cb72daaf6e80 in 544ms, sequenceid=8, compaction requested=false 2024-12-09T15:34:39,640 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 5bd5b4f56ae618186e84cb72daaf6e80: 2024-12-09T15:34:39,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T15:34:39,647 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:34:39,647 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:34:39,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:34:39,648 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:34:39,648 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T15:34:39,648 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T15:34:39,648 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=27388695, stopped=false 2024-12-09T15:34:39,648 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3651d8ead0f3,33343,1733758449248 2024-12-09T15:34:39,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:34:39,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:39,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:34:39,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:39,651 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:34:39,651 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:34:39,651 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:34:39,652 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:34:39,652 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:34:39,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:34:39,652 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,44921,1733758449658' ***** 2024-12-09T15:34:39,652 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:34:39,652 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:34:39,653 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(3091): Received CLOSE for 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3651d8ead0f3:44921. 2024-12-09T15:34:39,653 DEBUG [RS:0;3651d8ead0f3:44921 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:34:39,653 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5bd5b4f56ae618186e84cb72daaf6e80, disabling compactions & flushes 2024-12-09T15:34:39,653 DEBUG [RS:0;3651d8ead0f3:44921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:34:39,653 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:34:39,653 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:34:39,653 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. after waiting 0 ms 2024-12-09T15:34:39,653 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:39,653 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T15:34:39,662 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T15:34:39,662 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5bd5b4f56ae618186e84cb72daaf6e80=TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80.} 2024-12-09T15:34:39,662 DEBUG [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5bd5b4f56ae618186e84cb72daaf6e80 2024-12-09T15:34:39,662 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:34:39,662 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:34:39,663 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:34:39,663 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:34:39,663 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:34:39,693 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/default/TestLogRolling-testLogRollOnPipelineRestart/5bd5b4f56ae618186e84cb72daaf6e80/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-09T15:34:39,694 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:39,694 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5bd5b4f56ae618186e84cb72daaf6e80: Waiting for close lock at 1733758479653Running coprocessor pre-close hooks at 1733758479653Disabling compacts and flushes for region at 1733758479653Disabling writes for close at 1733758479653Writing region close event to WAL at 1733758479671 (+18 ms)Running coprocessor post-close hooks at 1733758479694 (+23 ms)Closed at 1733758479694 2024-12-09T15:34:39,694 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733758453030.5bd5b4f56ae618186e84cb72daaf6e80. 2024-12-09T15:34:39,714 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T15:34:39,726 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:34:39,727 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:34:39,727 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758479662Running coprocessor pre-close hooks at 1733758479662Disabling compacts and flushes for region at 1733758479662Disabling writes for close at 1733758479663 (+1 ms)Writing region close event to WAL at 1733758479702 (+39 ms)Running coprocessor post-close hooks at 1733758479726 (+24 ms)Closed at 1733758479726 2024-12-09T15:34:39,727 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T15:34:39,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:39,741 INFO [regionserver/3651d8ead0f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:34:39,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:39,815 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T15:34:39,815 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T15:34:39,862 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,44921,1733758449658; all regions closed. 2024-12-09T15:34:39,863 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,863 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,863 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,864 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,864 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:39,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741840_1023 (size=825) 2024-12-09T15:34:39,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741840_1023 (size=825) 2024-12-09T15:34:39,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:40,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:40,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:40,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:40,802 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T15:34:40,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:41,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:41,592 WARN [BootstrapNodeManager {}] regionserver.BootstrapNodeManager(142): failed to get live region servers from master org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3651d8ead0f3:33343 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.BootstrapNodeManager.getFromMaster(BootstrapNodeManager.java:140) ~[classes/:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$Stub.getLiveRegionServers(RegionServerStatusProtos.java:17191) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClusterConnectionImpl.getLiveRegionServers(AsyncClusterConnectionImpl.java:139) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.BootstrapNodeManager.getFromMaster(BootstrapNodeManager.java:140) ~[classes/:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 10 more 2024-12-09T15:34:41,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:34:41,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:34:41,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T15:34:41,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:41,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:41,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:42,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:42,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:42,775 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta after 4010ms 2024-12-09T15:34:42,776 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/WALs/3651d8ead0f3,44921,1733758449658/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta to hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/oldWALs/3651d8ead0f3%2C44921%2C1733758449658.meta.1733758452451.meta 2024-12-09T15:34:42,779 DEBUG [RS:0;3651d8ead0f3:44921 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/oldWALs 2024-12-09T15:34:42,779 INFO [RS:0;3651d8ead0f3:44921 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C44921%2C1733758449658.meta:.meta(num 1733758478729) 2024-12-09T15:34:42,779 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,779 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,780 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,780 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,780 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741844_1028 (size=1162) 2024-12-09T15:34:42,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741844_1028 (size=1162) 2024-12-09T15:34:42,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:42,788 DEBUG [RS:0;3651d8ead0f3:44921 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/oldWALs 2024-12-09T15:34:42,788 INFO [RS:0;3651d8ead0f3:44921 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C44921%2C1733758449658:(num 1733758479098) 2024-12-09T15:34:42,788 DEBUG [RS:0;3651d8ead0f3:44921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:34:42,788 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:34:42,788 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:34:42,788 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T15:34:42,788 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:34:42,788 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:34:42,788 INFO [RS:0;3651d8ead0f3:44921 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44921 2024-12-09T15:34:42,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,44921,1733758449658 2024-12-09T15:34:42,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:34:42,791 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:34:42,792 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,44921,1733758449658] 2024-12-09T15:34:42,794 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,44921,1733758449658 already deleted, retry=false 2024-12-09T15:34:42,794 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,44921,1733758449658 expired; onlineServers=0 2024-12-09T15:34:42,794 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3651d8ead0f3,33343,1733758449248' ***** 2024-12-09T15:34:42,794 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T15:34:42,794 INFO [M:0;3651d8ead0f3:33343 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:34:42,794 INFO [M:0;3651d8ead0f3:33343 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:34:42,794 DEBUG [M:0;3651d8ead0f3:33343 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T15:34:42,794 DEBUG [M:0;3651d8ead0f3:33343 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T15:34:42,794 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T15:34:42,794 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758451537 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758451537,5,FailOnTimeoutGroup] 2024-12-09T15:34:42,794 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758451534 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758451534,5,FailOnTimeoutGroup] 2024-12-09T15:34:42,794 INFO [M:0;3651d8ead0f3:33343 {}] hbase.ChoreService(370): Chore service for: master/3651d8ead0f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T15:34:42,795 INFO [M:0;3651d8ead0f3:33343 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:34:42,795 DEBUG [M:0;3651d8ead0f3:33343 {}] master.HMaster(1795): Stopping service threads 2024-12-09T15:34:42,795 INFO [M:0;3651d8ead0f3:33343 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T15:34:42,795 INFO [M:0;3651d8ead0f3:33343 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:34:42,795 INFO [M:0;3651d8ead0f3:33343 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T15:34:42,795 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T15:34:42,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T15:34:42,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:42,798 DEBUG [M:0;3651d8ead0f3:33343 {}] zookeeper.ZKUtil(347): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T15:34:42,798 WARN [M:0;3651d8ead0f3:33343 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T15:34:42,799 INFO [M:0;3651d8ead0f3:33343 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/.lastflushedseqids 2024-12-09T15:34:42,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741846_1030 (size=120) 2024-12-09T15:34:42,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741846_1030 (size=120) 2024-12-09T15:34:42,810 INFO [M:0;3651d8ead0f3:33343 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T15:34:42,810 INFO [M:0;3651d8ead0f3:33343 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T15:34:42,810 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:34:42,811 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:42,811 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:42,811 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:34:42,811 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:42,811 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-12-09T15:34:42,814 ERROR [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData-prefix:3651d8ead0f3,33343,1733758449248 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:42,814 WARN [FSHLog-0-hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData-prefix:3651d8ead0f3,33343,1733758449248 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:42,815 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3651d8ead0f3%2C33343%2C1733758449248:(num 1733758450964) roll requested 2024-12-09T15:34:42,815 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C33343%2C1733758449248.1733758482815 2024-12-09T15:34:42,822 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,822 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,822 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,822 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,822 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:42,822 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758482815 2024-12-09T15:34:42,824 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:42,824 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36237,DS-81a14574-0afe-4f97-a4ce-1a6ef7c2ed30,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T15:34:42,824 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 2024-12-09T15:34:42,825 WARN [IPC Server handler 2 on default port 35925 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-09T15:34:42,825 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 after 1ms 2024-12-09T15:34:42,829 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063),(127.0.0.1/127.0.0.1:34623:34623)] 2024-12-09T15:34:42,829 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 is not closed yet, will try archiving it next time 2024-12-09T15:34:42,853 DEBUG [M:0;3651d8ead0f3:33343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35248ef515294519929aef6990594c8a is 82, key is hbase:meta,,1/info:regioninfo/1733758452743/Put/seqid=0 2024-12-09T15:34:42,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741848_1033 (size=5672) 2024-12-09T15:34:42,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741848_1033 (size=5672) 2024-12-09T15:34:42,860 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35248ef515294519929aef6990594c8a 2024-12-09T15:34:42,884 DEBUG [M:0;3651d8ead0f3:33343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f46bf09f8ee49b09817f0bd79e26c92 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733758453658/Put/seqid=0 2024-12-09T15:34:42,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741849_1034 (size=6119) 2024-12-09T15:34:42,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741849_1034 (size=6119) 2024-12-09T15:34:42,891 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f46bf09f8ee49b09817f0bd79e26c92 2024-12-09T15:34:42,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:34:42,892 INFO [RS:0;3651d8ead0f3:44921 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:34:42,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44921-0x1012bd2046e0001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:34:42,892 INFO [RS:0;3651d8ead0f3:44921 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,44921,1733758449658; zookeeper connection closed. 2024-12-09T15:34:42,893 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e4bd5d6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e4bd5d6 2024-12-09T15:34:42,893 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T15:34:42,916 DEBUG [M:0;3651d8ead0f3:33343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfb8a2d601b04a46a29a3dd04cc640f3 is 69, key is 3651d8ead0f3,44921,1733758449658/rs:state/1733758451665/Put/seqid=0 2024-12-09T15:34:42,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741850_1035 (size=5156) 2024-12-09T15:34:42,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741850_1035 (size=5156) 2024-12-09T15:34:42,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:43,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:43,326 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfb8a2d601b04a46a29a3dd04cc640f3 2024-12-09T15:34:43,355 DEBUG [M:0;3651d8ead0f3:33343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a819ed8fe344bb39e158adb26b4bf4f is 52, key is load_balancer_on/state:d/1733758453016/Put/seqid=0 2024-12-09T15:34:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741851_1036 (size=5056) 2024-12-09T15:34:43,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741851_1036 (size=5056) 2024-12-09T15:34:43,368 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a819ed8fe344bb39e158adb26b4bf4f 2024-12-09T15:34:43,375 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35248ef515294519929aef6990594c8a as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/35248ef515294519929aef6990594c8a 2024-12-09T15:34:43,382 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/35248ef515294519929aef6990594c8a, entries=8, sequenceid=56, filesize=5.5 K 2024-12-09T15:34:43,384 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f46bf09f8ee49b09817f0bd79e26c92 as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8f46bf09f8ee49b09817f0bd79e26c92 2024-12-09T15:34:43,390 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8f46bf09f8ee49b09817f0bd79e26c92, entries=6, sequenceid=56, filesize=6.0 K 2024-12-09T15:34:43,391 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dfb8a2d601b04a46a29a3dd04cc640f3 as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dfb8a2d601b04a46a29a3dd04cc640f3 2024-12-09T15:34:43,397 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dfb8a2d601b04a46a29a3dd04cc640f3, entries=1, sequenceid=56, filesize=5.0 K 2024-12-09T15:34:43,398 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a819ed8fe344bb39e158adb26b4bf4f as hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a819ed8fe344bb39e158adb26b4bf4f 2024-12-09T15:34:43,409 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a819ed8fe344bb39e158adb26b4bf4f, entries=1, sequenceid=56, filesize=4.9 K 2024-12-09T15:34:43,410 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 599ms, sequenceid=56, compaction requested=false 2024-12-09T15:34:43,438 INFO [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:43,439 DEBUG [M:0;3651d8ead0f3:33343 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758482810Disabling compacts and flushes for region at 1733758482810Disabling writes for close at 1733758482811 (+1 ms)Obtaining lock to block concurrent updates at 1733758482811Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733758482811Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733758482811Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733758482830 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733758482830Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733758482853 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733758482853Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733758482866 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733758482883 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733758482883Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733758482896 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733758482916 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733758482916Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733758483334 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733758483354 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733758483354Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@815df4e: reopening flushed file at 1733758483374 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62d8d1a6: reopening flushed file at 1733758483383 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@458cc016: reopening flushed file at 1733758483390 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47307eaa: reopening flushed file at 1733758483397 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 599ms, sequenceid=56, compaction requested=false at 1733758483410 (+13 ms)Writing region close event to WAL at 1733758483438 (+28 ms)Closed at 1733758483438 2024-12-09T15:34:43,442 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:43,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:43,446 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:43,447 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:43,448 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:34:43,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42259 is added to blk_1073741847_1031 (size=757) 2024-12-09T15:34:43,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741847_1031 (size=757) 2024-12-09T15:34:43,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45885 is added to blk_1073741830_1032 (size=27271) 2024-12-09T15:34:43,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:43,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:43,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:44,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:44,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:44,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:44,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:44,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:45,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:45,233 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:34:45,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:45,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:45,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 after 68138ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:45,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:45,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:46,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:46,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:46,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:46,802 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T15:34:46,826 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 after 4002ms 2024-12-09T15:34:46,827 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/WALs/3651d8ead0f3,33343,1733758449248/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 to hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/oldWALs/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 2024-12-09T15:34:46,831 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/MasterData/oldWALs/3651d8ead0f3%2C33343%2C1733758449248.1733758450964 to hdfs://localhost:35925/user/jenkins/test-data/c6edb7c9-4c9c-301e-8d4a-2e2ed2f2ddd0/oldWALs/3651d8ead0f3%2C33343%2C1733758449248.1733758450964$masterlocalwal$ 2024-12-09T15:34:46,831 INFO [M:0;3651d8ead0f3:33343 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T15:34:46,832 INFO [M:0;3651d8ead0f3:33343 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33343 2024-12-09T15:34:46,832 INFO [M:0;3651d8ead0f3:33343 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:34:46,832 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:34:46,934 INFO [M:0;3651d8ead0f3:33343 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:34:46,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:34:46,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33343-0x1012bd2046e0000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:34:46,955 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41b9791e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:46,956 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1240a3cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:46,956 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:46,956 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f3e5a16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:46,956 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c39138a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:46,967 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:34:46,967 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:34:46,968 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:34:46,968 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871040900-172.17.0.3-1733758443620 (Datanode Uuid c9b088f4-8ae4-4179-83dc-8e29ab739952) service to localhost/127.0.0.1:35925 2024-12-09T15:34:46,969 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data3/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:46,969 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:46,969 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data4/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:46,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6db938{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:46,980 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cf288ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:46,980 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:46,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ef50a45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:46,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19016e01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:46,983 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:34:46,983 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:34:46,983 WARN [BP-871040900-172.17.0.3-1733758443620 heartbeating to localhost/127.0.0.1:35925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871040900-172.17.0.3-1733758443620 (Datanode Uuid fdd28ed2-4e85-4af5-82a0-58145929e994) service to localhost/127.0.0.1:35925 2024-12-09T15:34:46,983 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:34:46,983 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data1/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:46,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/cluster_11b09c6e-a69a-2589-3b8e-7b601f49e3e3/data/data2/current/BP-871040900-172.17.0.3-1733758443620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:34:46,984 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:34:46,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49ef22be{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:34:46,990 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b230242{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:34:46,990 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:34:46,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7096145a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:34:46,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25a29a07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir/,STOPPED} 2024-12-09T15:34:46,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:47,003 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T15:34:47,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T15:34:47,042 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 153) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35925 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:35925 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35925 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35925 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35925 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35925 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35925 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35925 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=455 (was 429) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=970 (was 749) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5944 (was 5986) 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=970, ProcessCount=11, AvailableMemoryMB=5944 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.log.dir so I do NOT create it in target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8e6ff928-5761-c878-b5b5-5f58f82d8acb/hadoop.tmp.dir so I do NOT create it in target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f, deleteOnExit=true 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/test.cache.data in system properties and HBase conf 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir in system properties and HBase conf 2024-12-09T15:34:47,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T15:34:47,053 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/nfs.dump.dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/java.io.tmpdir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:34:47,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T15:34:47,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T15:34:47,067 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:34:47,144 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:47,148 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:47,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:47,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:47,149 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:34:47,150 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:47,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1592e8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:47,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ac52070{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:47,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:47,268 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1367dc96{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/java.io.tmpdir/jetty-localhost-43021-hadoop-hdfs-3_4_1-tests_jar-_-any-9767929068300018885/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:34:47,268 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5206174e{HTTP/1.1, (http/1.1)}{localhost:43021} 2024-12-09T15:34:47,268 INFO [Time-limited test {}] server.Server(415): Started @212920ms 2024-12-09T15:34:47,284 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:34:47,348 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:47,350 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:47,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:47,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:47,351 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:34:47,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e9b78ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:47,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@423ae426{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:47,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77f859f0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/java.io.tmpdir/jetty-localhost-40601-hadoop-hdfs-3_4_1-tests_jar-_-any-13157691714667626691/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:47,502 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2037cad5{HTTP/1.1, (http/1.1)}{localhost:40601} 2024-12-09T15:34:47,502 INFO [Time-limited test {}] server.Server(415): Started @213153ms 2024-12-09T15:34:47,504 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:47,573 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:34:47,581 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:34:47,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:34:47,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:34:47,591 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:34:47,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8ac3c66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:34:47,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f8e2aa3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:34:47,683 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data1/current/BP-1631766011-172.17.0.3-1733758487085/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:47,691 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data2/current/BP-1631766011-172.17.0.3-1733758487085/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:47,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:47,747 WARN [Thread-1635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:47,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26d8a30eb8083587 with lease ID 0xbf22ad01fd812cbe: Processing first storage report for DS-c8577f9f-65f5-4fb2-9a98-e5d725f7ce63 from datanode DatanodeRegistration(127.0.0.1:41417, datanodeUuid=64283df8-e8bc-4cd1-892f-220e1e4cfe81, infoPort=35795, infoSecurePort=0, ipcPort=36751, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085) 2024-12-09T15:34:47,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26d8a30eb8083587 with lease ID 0xbf22ad01fd812cbe: from storage DS-c8577f9f-65f5-4fb2-9a98-e5d725f7ce63 node DatanodeRegistration(127.0.0.1:41417, datanodeUuid=64283df8-e8bc-4cd1-892f-220e1e4cfe81, infoPort=35795, infoSecurePort=0, ipcPort=36751, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:47,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26d8a30eb8083587 with lease ID 0xbf22ad01fd812cbe: Processing first storage report for DS-98365bfe-1a6e-47f1-935d-f90a5e9e6ecc from datanode DatanodeRegistration(127.0.0.1:41417, datanodeUuid=64283df8-e8bc-4cd1-892f-220e1e4cfe81, infoPort=35795, infoSecurePort=0, ipcPort=36751, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085) 2024-12-09T15:34:47,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26d8a30eb8083587 with lease ID 0xbf22ad01fd812cbe: from storage DS-98365bfe-1a6e-47f1-935d-f90a5e9e6ecc node DatanodeRegistration(127.0.0.1:41417, datanodeUuid=64283df8-e8bc-4cd1-892f-220e1e4cfe81, infoPort=35795, infoSecurePort=0, ipcPort=36751, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:47,773 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@bff2115{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/java.io.tmpdir/jetty-localhost-41325-hadoop-hdfs-3_4_1-tests_jar-_-any-1523914338424702869/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:34:47,774 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a2777cc{HTTP/1.1, (http/1.1)}{localhost:41325} 2024-12-09T15:34:47,774 INFO [Time-limited test {}] server.Server(415): Started @213425ms 2024-12-09T15:34:47,776 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:34:47,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:47,952 WARN [Thread-1682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data3/current/BP-1631766011-172.17.0.3-1733758487085/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:47,957 WARN [Thread-1683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data4/current/BP-1631766011-172.17.0.3-1733758487085/current, will proceed with Du for space computation calculation, 2024-12-09T15:34:48,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:48,023 WARN [Thread-1671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:34:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45460a1bb1f68f4e with lease ID 0xbf22ad01fd812cbf: Processing first storage report for DS-2d6e33f9-50bd-4e9b-bc97-d74b95e64bd9 from datanode DatanodeRegistration(127.0.0.1:33877, datanodeUuid=e982f75d-d719-4dd0-b5ae-d4b40cd692ca, infoPort=38083, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085) 2024-12-09T15:34:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45460a1bb1f68f4e with lease ID 0xbf22ad01fd812cbf: from storage DS-2d6e33f9-50bd-4e9b-bc97-d74b95e64bd9 node DatanodeRegistration(127.0.0.1:33877, datanodeUuid=e982f75d-d719-4dd0-b5ae-d4b40cd692ca, infoPort=38083, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45460a1bb1f68f4e with lease ID 0xbf22ad01fd812cbf: Processing first storage report for DS-3354ddfa-a93f-4fda-b1d9-cbc8f47f6546 from datanode DatanodeRegistration(127.0.0.1:33877, datanodeUuid=e982f75d-d719-4dd0-b5ae-d4b40cd692ca, infoPort=38083, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085) 2024-12-09T15:34:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45460a1bb1f68f4e with lease ID 0xbf22ad01fd812cbf: from storage DS-3354ddfa-a93f-4fda-b1d9-cbc8f47f6546 node DatanodeRegistration(127.0.0.1:33877, datanodeUuid=e982f75d-d719-4dd0-b5ae-d4b40cd692ca, infoPort=38083, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=889268429;c=1733758487085), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:34:48,127 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b 2024-12-09T15:34:48,137 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/zookeeper_0, clientPort=52538, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T15:34:48,139 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52538 2024-12-09T15:34:48,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:48,141 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:48,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:34:48,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:34:48,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:48,559 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be with version=8 2024-12-09T15:34:48,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase-staging 2024-12-09T15:34:48,562 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:34:48,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:48,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:48,562 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:34:48,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:48,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:34:48,562 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T15:34:48,563 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:34:48,564 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43765 2024-12-09T15:34:48,566 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43765 connecting to ZooKeeper ensemble=127.0.0.1:52538 2024-12-09T15:34:48,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:437650x0, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:34:48,614 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43765-0x1012bd29e610000 connected 2024-12-09T15:34:48,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:48,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:48,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:34:48,705 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be, hbase.cluster.distributed=false 2024-12-09T15:34:48,709 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:34:48,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43765 2024-12-09T15:34:48,730 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43765 2024-12-09T15:34:48,738 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43765 2024-12-09T15:34:48,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:48,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43765 2024-12-09T15:34:48,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43765 2024-12-09T15:34:48,770 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:34:48,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:48,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:48,771 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:34:48,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:34:48,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:34:48,771 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:34:48,771 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:34:48,775 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40997 2024-12-09T15:34:48,777 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40997 connecting to ZooKeeper ensemble=127.0.0.1:52538 2024-12-09T15:34:48,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:48,780 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:48,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:409970x0, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:34:48,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:48,788 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40997-0x1012bd29e610001 connected 2024-12-09T15:34:48,788 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:34:48,788 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:34:48,789 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:34:48,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T15:34:48,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:34:48,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40997 2024-12-09T15:34:48,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40997 2024-12-09T15:34:48,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40997 2024-12-09T15:34:48,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40997 2024-12-09T15:34:48,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40997 2024-12-09T15:34:48,815 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3651d8ead0f3:43765 2024-12-09T15:34:48,816 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:48,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:48,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:48,822 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:48,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T15:34:48,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:48,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:48,830 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:34:48,831 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3651d8ead0f3,43765,1733758488561 from backup master directory 2024-12-09T15:34:48,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:48,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:48,838 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:34:48,838 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:48,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:34:48,855 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/hbase.id] with ID: 0b841f06-34eb-48a1-b7fe-5cf8d3bf4018 2024-12-09T15:34:48,856 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/.tmp/hbase.id 2024-12-09T15:34:48,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:34:48,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:34:48,870 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/.tmp/hbase.id]:[hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/hbase.id] 2024-12-09T15:34:48,894 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:48,894 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T15:34:48,896 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T15:34:48,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:48,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:48,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:34:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:34:48,923 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:34:48,924 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T15:34:48,927 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:34:48,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:34:48,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:34:48,943 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store 2024-12-09T15:34:48,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:34:48,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:34:48,961 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:48,962 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:34:48,962 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:48,962 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:48,962 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:34:48,962 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:48,962 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:34:48,962 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758488961Disabling compacts and flushes for region at 1733758488961Disabling writes for close at 1733758488962 (+1 ms)Writing region close event to WAL at 1733758488962Closed at 1733758488962 2024-12-09T15:34:48,963 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/.initializing 2024-12-09T15:34:48,963 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/WALs/3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:48,966 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C43765%2C1733758488561, suffix=, logDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/WALs/3651d8ead0f3,43765,1733758488561, archiveDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/oldWALs, maxLogs=10 2024-12-09T15:34:48,967 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C43765%2C1733758488561.1733758488967 2024-12-09T15:34:48,974 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/WALs/3651d8ead0f3,43765,1733758488561/3651d8ead0f3%2C43765%2C1733758488561.1733758488967 2024-12-09T15:34:48,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38083:38083),(127.0.0.1/127.0.0.1:35795:35795)] 2024-12-09T15:34:48,979 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:34:48,979 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:48,980 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,980 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,983 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,984 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T15:34:48,984 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:48,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:48,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T15:34:48,987 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:48,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:48,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,988 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T15:34:48,988 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:48,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:48,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T15:34:48,990 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:48,991 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:48,991 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,992 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,992 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,994 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,994 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:48,996 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T15:34:48,997 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:34:49,000 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:34:49,001 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850901, jitterRate=0.08197741210460663}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T15:34:49,002 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733758488980Initializing all the Stores at 1733758488982 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758488982Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758488982Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758488982Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758488982Cleaning up temporary data from old regions at 1733758488994 (+12 ms)Region opened successfully at 1733758489001 (+7 ms) 2024-12-09T15:34:49,002 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T15:34:49,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:49,006 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@515edd60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:34:49,007 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T15:34:49,007 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T15:34:49,007 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T15:34:49,008 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T15:34:49,008 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T15:34:49,009 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T15:34:49,009 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T15:34:49,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T15:34:49,016 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T15:34:49,018 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T15:34:49,018 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T15:34:49,019 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T15:34:49,020 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T15:34:49,021 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T15:34:49,022 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T15:34:49,025 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T15:34:49,026 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T15:34:49,027 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T15:34:49,030 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T15:34:49,031 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T15:34:49,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:34:49,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:34:49,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,034 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3651d8ead0f3,43765,1733758488561, sessionid=0x1012bd29e610000, setting cluster-up flag (Was=false) 2024-12-09T15:34:49,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,046 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T15:34:49,047 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:49,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,056 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T15:34:49,057 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:49,059 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T15:34:49,060 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:49,061 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T15:34:49,061 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T15:34:49,061 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3651d8ead0f3,43765,1733758488561 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3651d8ead0f3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:34:49,063 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,068 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:49,068 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T15:34:49,069 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,070 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T15:34:49,080 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733758519080 2024-12-09T15:34:49,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T15:34:49,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T15:34:49,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T15:34:49,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T15:34:49,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T15:34:49,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T15:34:49,081 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,082 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T15:34:49,082 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T15:34:49,082 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T15:34:49,082 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T15:34:49,082 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T15:34:49,086 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758489082,5,FailOnTimeoutGroup] 2024-12-09T15:34:49,090 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758489086,5,FailOnTimeoutGroup] 2024-12-09T15:34:49,090 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,091 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T15:34:49,091 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,091 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,100 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(746): ClusterId : 0b841f06-34eb-48a1-b7fe-5cf8d3bf4018 2024-12-09T15:34:49,100 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:34:49,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:34:49,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:34:49,104 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:34:49,104 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:34:49,107 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:34:49,107 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T15:34:49,108 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be 2024-12-09T15:34:49,108 DEBUG [RS:0;3651d8ead0f3:40997 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73378a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:34:49,129 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3651d8ead0f3:40997 2024-12-09T15:34:49,130 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:34:49,130 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:34:49,130 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:34:49,131 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,43765,1733758488561 with port=40997, startcode=1733758488770 2024-12-09T15:34:49,131 DEBUG [RS:0;3651d8ead0f3:40997 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:34:49,134 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41239, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:34:49,134 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43765 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,135 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43765 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,137 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be 2024-12-09T15:34:49,137 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32865 2024-12-09T15:34:49,137 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:34:49,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:34:49,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:34:49,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:34:49,140 DEBUG [RS:0;3651d8ead0f3:40997 {}] zookeeper.ZKUtil(111): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,141 WARN [RS:0;3651d8ead0f3:40997 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:34:49,141 INFO [RS:0;3651d8ead0f3:40997 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:34:49,141 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,141 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,40997,1733758488770] 2024-12-09T15:34:49,145 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:34:49,147 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:34:49,148 INFO [RS:0;3651d8ead0f3:40997 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:34:49,148 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,148 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:34:49,149 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:34:49,149 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,149 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,149 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,149 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,149 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,149 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,149 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:34:49,149 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,150 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,150 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,150 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,150 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,150 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:34:49,150 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:34:49,150 DEBUG [RS:0;3651d8ead0f3:40997 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:34:49,152 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,152 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,152 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,152 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,152 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,152 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,40997,1733758488770-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:34:49,173 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:34:49,173 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,40997,1733758488770-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,173 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,173 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.Replication(171): 3651d8ead0f3,40997,1733758488770 started 2024-12-09T15:34:49,196 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,196 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,40997,1733758488770, RpcServer on 3651d8ead0f3/172.17.0.3:40997, sessionid=0x1012bd29e610001 2024-12-09T15:34:49,196 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:34:49,196 DEBUG [RS:0;3651d8ead0f3:40997 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,196 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,40997,1733758488770' 2024-12-09T15:34:49,196 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:34:49,198 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:34:49,203 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:34:49,203 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:34:49,203 DEBUG [RS:0;3651d8ead0f3:40997 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,203 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,40997,1733758488770' 2024-12-09T15:34:49,203 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:34:49,203 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:34:49,205 DEBUG [RS:0;3651d8ead0f3:40997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:34:49,205 INFO [RS:0;3651d8ead0f3:40997 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:34:49,205 INFO [RS:0;3651d8ead0f3:40997 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:34:49,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:49,307 INFO [RS:0;3651d8ead0f3:40997 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C40997%2C1733758488770, suffix=, logDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770, archiveDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/oldWALs, maxLogs=32 2024-12-09T15:34:49,308 INFO [RS:0;3651d8ead0f3:40997 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C40997%2C1733758488770.1733758489308 2024-12-09T15:34:49,345 INFO [RS:0;3651d8ead0f3:40997 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758489308 2024-12-09T15:34:49,349 DEBUG [RS:0;3651d8ead0f3:40997 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38083:38083),(127.0.0.1/127.0.0.1:35795:35795)] 2024-12-09T15:34:49,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:49,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:34:49,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:34:49,543 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:34:49,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:34:49,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:34:49,548 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:34:49,548 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:34:49,553 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:34:49,553 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:34:49,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740 2024-12-09T15:34:49,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740 2024-12-09T15:34:49,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:34:49,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:34:49,558 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:34:49,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:34:49,563 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:34:49,563 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797211, jitterRate=0.013706564903259277}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:34:49,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733758489539Initializing all the Stores at 1733758489540 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758489540Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758489540Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758489540Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758489541 (+1 ms)Cleaning up temporary data from old regions at 1733758489557 (+16 ms)Region opened successfully at 1733758489564 (+7 ms) 2024-12-09T15:34:49,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:34:49,564 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:34:49,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:34:49,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:34:49,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:34:49,565 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:34:49,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758489564Disabling compacts and flushes for region at 1733758489564Disabling writes for close at 1733758489564Writing region close event to WAL at 1733758489565 (+1 ms)Closed at 1733758489565 2024-12-09T15:34:49,567 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:49,567 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T15:34:49,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T15:34:49,569 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:34:49,571 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T15:34:49,721 DEBUG [3651d8ead0f3:43765 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T15:34:49,722 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,724 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,40997,1733758488770, state=OPENING 2024-12-09T15:34:49,725 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T15:34:49,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:34:49,727 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:34:49,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:49,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:49,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,40997,1733758488770}] 2024-12-09T15:34:49,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:49,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:49,881 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T15:34:49,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51343, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T15:34:49,889 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T15:34:49,889 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:34:49,892 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C40997%2C1733758488770.meta, suffix=.meta, logDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770, archiveDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/oldWALs, maxLogs=32 2024-12-09T15:34:49,892 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C40997%2C1733758488770.meta.1733758489892.meta 2024-12-09T15:34:49,904 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.meta.1733758489892.meta 2024-12-09T15:34:49,911 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38083:38083),(127.0.0.1/127.0.0.1:35795:35795)] 2024-12-09T15:34:49,914 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:34:49,914 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T15:34:49,914 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T15:34:49,914 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T15:34:49,914 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T15:34:49,914 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:49,915 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T15:34:49,915 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T15:34:49,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:34:49,917 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:34:49,917 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:34:49,919 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:34:49,919 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:34:49,920 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:34:49,920 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:34:49,921 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:34:49,921 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:49,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:34:49,922 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:34:49,923 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740 2024-12-09T15:34:49,924 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740 2024-12-09T15:34:49,926 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:34:49,926 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:34:49,926 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:34:49,928 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:34:49,929 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702405, jitterRate=-0.10684658586978912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:34:49,929 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T15:34:49,930 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733758489915Writing region info on filesystem at 1733758489915Initializing all the Stores at 1733758489916 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758489916Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758489916Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758489916Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758489916Cleaning up temporary data from old regions at 1733758489926 (+10 ms)Running coprocessor post-open hooks at 1733758489929 (+3 ms)Region opened successfully at 1733758489929 2024-12-09T15:34:49,931 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733758489881 2024-12-09T15:34:49,938 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,940 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T15:34:49,940 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T15:34:49,941 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,40997,1733758488770, state=OPEN 2024-12-09T15:34:49,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:34:49,949 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:49,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:34:49,950 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:49,950 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:34:49,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T15:34:49,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,40997,1733758488770 in 222 msec 2024-12-09T15:34:49,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T15:34:49,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 387 msec 2024-12-09T15:34:49,959 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:34:49,959 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T15:34:49,961 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:34:49,961 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,40997,1733758488770, seqNum=-1] 2024-12-09T15:34:49,961 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:34:49,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33033, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:34:49,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 926 msec 2024-12-09T15:34:49,989 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733758489989, completionTime=-1 2024-12-09T15:34:49,990 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T15:34:49,990 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T15:34:49,998 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T15:34:49,998 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733758549998 2024-12-09T15:34:49,998 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733758609998 2024-12-09T15:34:49,998 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 8 msec 2024-12-09T15:34:49,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43765,1733758488561-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43765,1733758488561-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43765,1733758488561-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3651d8ead0f3:43765, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:49,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:50,001 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T15:34:50,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:50,006 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:50,014 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.176sec 2024-12-09T15:34:50,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T15:34:50,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T15:34:50,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T15:34:50,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T15:34:50,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T15:34:50,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43765,1733758488561-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:34:50,015 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43765,1733758488561-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T15:34:50,030 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T15:34:50,030 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T15:34:50,030 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,43765,1733758488561-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:34:50,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a72ae8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:34:50,102 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3651d8ead0f3,43765,-1 for getting cluster id 2024-12-09T15:34:50,102 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T15:34:50,107 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0b841f06-34eb-48a1-b7fe-5cf8d3bf4018' 2024-12-09T15:34:50,108 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T15:34:50,108 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0b841f06-34eb-48a1-b7fe-5cf8d3bf4018" 2024-12-09T15:34:50,108 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54b9574a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:34:50,108 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3651d8ead0f3,43765,-1] 2024-12-09T15:34:50,109 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T15:34:50,109 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:34:50,110 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T15:34:50,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2494db76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:34:50,113 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:34:50,114 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,40997,1733758488770, seqNum=-1] 2024-12-09T15:34:50,115 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:34:50,116 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54588, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:34:50,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:50,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:34:50,128 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T15:34:50,129 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T15:34:50,130 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 3651d8ead0f3,43765,1733758488561 2024-12-09T15:34:50,130 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@203e8736 2024-12-09T15:34:50,131 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T15:34:50,132 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51178, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T15:34:50,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T15:34:50,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T15:34:50,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:34:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:34:50,149 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T15:34:50,149 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:50,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-09T15:34:50,151 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T15:34:50,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:34:50,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741835_1011 (size=405) 2024-12-09T15:34:50,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741835_1011 (size=405) 2024-12-09T15:34:50,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:50,590 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cac38d334b41e8045bdda283dc300551, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be 2024-12-09T15:34:50,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741836_1012 (size=88) 2024-12-09T15:34:50,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741836_1012 (size=88) 2024-12-09T15:34:50,602 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:50,602 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing cac38d334b41e8045bdda283dc300551, disabling compactions & flushes 2024-12-09T15:34:50,602 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:34:50,602 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:34:50,602 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. after waiting 0 ms 2024-12-09T15:34:50,602 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:34:50,602 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:34:50,602 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for cac38d334b41e8045bdda283dc300551: Waiting for close lock at 1733758490602Disabling compacts and flushes for region at 1733758490602Disabling writes for close at 1733758490602Writing region close event to WAL at 1733758490602Closed at 1733758490602 2024-12-09T15:34:50,606 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T15:34:50,607 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733758490606"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733758490606"}]},"ts":"1733758490606"} 2024-12-09T15:34:50,611 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T15:34:50,612 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T15:34:50,612 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758490612"}]},"ts":"1733758490612"} 2024-12-09T15:34:50,615 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-09T15:34:50,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cac38d334b41e8045bdda283dc300551, ASSIGN}] 2024-12-09T15:34:50,618 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cac38d334b41e8045bdda283dc300551, ASSIGN 2024-12-09T15:34:50,620 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cac38d334b41e8045bdda283dc300551, ASSIGN; state=OFFLINE, location=3651d8ead0f3,40997,1733758488770; forceNewPlan=false, retain=false 2024-12-09T15:34:50,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:50,771 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cac38d334b41e8045bdda283dc300551, regionState=OPENING, regionLocation=3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:50,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cac38d334b41e8045bdda283dc300551, ASSIGN because future has completed 2024-12-09T15:34:50,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cac38d334b41e8045bdda283dc300551, server=3651d8ead0f3,40997,1733758488770}] 2024-12-09T15:34:50,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:50,931 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:34:50,932 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cac38d334b41e8045bdda283dc300551, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:34:50,932 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,932 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:34:50,932 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,932 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,933 INFO [StoreOpener-cac38d334b41e8045bdda283dc300551-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,935 INFO [StoreOpener-cac38d334b41e8045bdda283dc300551-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cac38d334b41e8045bdda283dc300551 columnFamilyName info 2024-12-09T15:34:50,935 DEBUG [StoreOpener-cac38d334b41e8045bdda283dc300551-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:34:50,936 INFO [StoreOpener-cac38d334b41e8045bdda283dc300551-1 {}] regionserver.HStore(327): Store=cac38d334b41e8045bdda283dc300551/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:34:50,936 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,937 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,937 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,937 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,937 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,939 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,943 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:34:50,943 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cac38d334b41e8045bdda283dc300551; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879028, jitterRate=0.11774294078350067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T15:34:50,943 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cac38d334b41e8045bdda283dc300551 2024-12-09T15:34:50,944 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cac38d334b41e8045bdda283dc300551: Running coprocessor pre-open hook at 1733758490932Writing region info on filesystem at 1733758490932Initializing all the Stores at 1733758490933 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758490933Cleaning up temporary data from old regions at 1733758490937 (+4 ms)Running coprocessor post-open hooks at 1733758490943 (+6 ms)Region opened successfully at 1733758490944 (+1 ms) 2024-12-09T15:34:50,945 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551., pid=6, masterSystemTime=1733758490927 2024-12-09T15:34:50,947 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:34:50,948 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:34:50,948 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cac38d334b41e8045bdda283dc300551, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,40997,1733758488770 2024-12-09T15:34:50,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cac38d334b41e8045bdda283dc300551, server=3651d8ead0f3,40997,1733758488770 because future has completed 2024-12-09T15:34:50,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T15:34:50,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cac38d334b41e8045bdda283dc300551, server=3651d8ead0f3,40997,1733758488770 in 178 msec 2024-12-09T15:34:50,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T15:34:50,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cac38d334b41e8045bdda283dc300551, ASSIGN in 339 msec 2024-12-09T15:34:50,959 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T15:34:50,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758490959"}]},"ts":"1733758490959"} 2024-12-09T15:34:50,961 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-09T15:34:50,962 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T15:34:50,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 829 msec 2024-12-09T15:34:51,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:51,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:51,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:34:51,689 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T15:34:51,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:34:51,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T15:34:51,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:34:51,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T15:34:51,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:51,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:52,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:52,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:52,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:52,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:53,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:53,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:53,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:53,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:54,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:54,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 after 68086ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:54,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:54,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta after 68111ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:34:54,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:54,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:55,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:55,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:55,417 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:34:55,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:34:55,450 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T15:34:55,450 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-09T15:34:55,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:55,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:56,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:56,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:56,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:56,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:57,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:57,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:57,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:57,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:58,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:58,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:58,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:58,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:59,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:59,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:59,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:34:59,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:00,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:35:00,183 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T15:35:00,183 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-09T15:35:00,187 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:00,187 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:00,192 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551., hostname=3651d8ead0f3,40997,1733758488770, seqNum=2] 2024-12-09T15:35:00,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T15:35:00,215 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T15:35:00,216 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T15:35:00,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T15:35:00,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:00,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T15:35:00,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:00,383 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing cac38d334b41e8045bdda283dc300551 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T15:35:00,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/1c455a765a0c494783214feb73b7a6f6 is 1080, key is row0001/info:/1733758500194/Put/seqid=0 2024-12-09T15:35:00,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741837_1013 (size=6033) 2024-12-09T15:35:00,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741837_1013 (size=6033) 2024-12-09T15:35:00,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:00,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:00,834 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/1c455a765a0c494783214feb73b7a6f6 2024-12-09T15:35:00,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/1c455a765a0c494783214feb73b7a6f6 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1c455a765a0c494783214feb73b7a6f6 2024-12-09T15:35:00,852 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1c455a765a0c494783214feb73b7a6f6, entries=1, sequenceid=5, filesize=5.9 K 2024-12-09T15:35:00,854 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cac38d334b41e8045bdda283dc300551 in 472ms, sequenceid=5, compaction requested=false 2024-12-09T15:35:00,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for cac38d334b41e8045bdda283dc300551: 2024-12-09T15:35:00,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:00,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T15:35:00,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T15:35:00,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T15:35:00,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 646 msec 2024-12-09T15:35:00,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 672 msec 2024-12-09T15:35:01,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:01,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:01,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:01,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:02,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:02,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:02,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:02,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:03,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:03,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:03,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:03,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:04,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:04,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:04,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:04,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:05,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:05,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:05,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:05,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:06,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:06,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:06,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:06,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:07,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:07,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:07,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:07,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:08,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:08,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:08,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:08,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:09,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:09,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:09,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:09,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:10,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:10,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:10,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T15:35:10,313 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T15:35:10,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:10,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T15:35:10,321 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T15:35:10,323 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T15:35:10,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T15:35:10,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-09T15:35:10,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:10,479 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing cac38d334b41e8045bdda283dc300551 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T15:35:10,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/5fa12da74b7d43e2902c7c77d24196d0 is 1080, key is row0002/info:/1733758510315/Put/seqid=0 2024-12-09T15:35:10,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741838_1014 (size=6033) 2024-12-09T15:35:10,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741838_1014 (size=6033) 2024-12-09T15:35:10,527 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/5fa12da74b7d43e2902c7c77d24196d0 2024-12-09T15:35:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/5fa12da74b7d43e2902c7c77d24196d0 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/5fa12da74b7d43e2902c7c77d24196d0 2024-12-09T15:35:10,549 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/5fa12da74b7d43e2902c7c77d24196d0, entries=1, sequenceid=9, filesize=5.9 K 2024-12-09T15:35:10,552 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cac38d334b41e8045bdda283dc300551 in 73ms, sequenceid=9, compaction requested=false 2024-12-09T15:35:10,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for cac38d334b41e8045bdda283dc300551: 2024-12-09T15:35:10,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:10,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-09T15:35:10,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-09T15:35:10,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T15:35:10,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 233 msec 2024-12-09T15:35:10,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 263 msec 2024-12-09T15:35:10,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:10,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:11,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:11,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:11,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:11,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:12,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:12,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:12,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:12,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:13,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:13,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:13,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:13,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:14,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:14,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:14,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:14,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:15,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:15,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:15,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:15,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:16,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:16,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:16,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:16,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:17,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:17,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:17,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:17,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:18,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:18,126 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:35:18,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:18,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:18,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:19,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:19,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:19,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:19,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:20,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:20,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:20,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T15:35:20,364 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T15:35:20,368 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C40997%2C1733758488770.1733758520368 2024-12-09T15:35:20,374 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:20,375 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:20,375 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:20,375 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:20,375 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:20,375 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758489308 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758520368 2024-12-09T15:35:20,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741833_1009 (size=5546) 2024-12-09T15:35:20,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741833_1009 (size=5546) 2024-12-09T15:35:20,391 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38083:38083),(127.0.0.1/127.0.0.1:35795:35795)] 2024-12-09T15:35:20,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:20,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:20,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T15:35:20,395 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T15:35:20,396 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T15:35:20,397 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T15:35:20,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-09T15:35:20,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:20,551 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing cac38d334b41e8045bdda283dc300551 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T15:35:20,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/3b5aab35beba4f5ba70a3bc209df6845 is 1080, key is row0003/info:/1733758520365/Put/seqid=0 2024-12-09T15:35:20,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741840_1016 (size=6033) 2024-12-09T15:35:20,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741840_1016 (size=6033) 2024-12-09T15:35:20,565 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/3b5aab35beba4f5ba70a3bc209df6845 2024-12-09T15:35:20,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/3b5aab35beba4f5ba70a3bc209df6845 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/3b5aab35beba4f5ba70a3bc209df6845 2024-12-09T15:35:20,583 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/3b5aab35beba4f5ba70a3bc209df6845, entries=1, sequenceid=13, filesize=5.9 K 2024-12-09T15:35:20,584 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cac38d334b41e8045bdda283dc300551 in 33ms, sequenceid=13, compaction requested=true 2024-12-09T15:35:20,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for cac38d334b41e8045bdda283dc300551: 2024-12-09T15:35:20,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:20,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-09T15:35:20,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-09T15:35:20,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T15:35:20,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-12-09T15:35:20,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-12-09T15:35:20,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:20,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:21,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:21,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:21,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:21,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:22,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:22,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:22,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:22,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:23,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:23,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:23,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:23,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:24,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:24,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:24,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:24,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:25,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:25,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:25,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:25,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:26,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:26,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:26,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:26,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:27,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:27,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:27,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:27,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:28,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:28,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:28,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:28,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:29,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:29,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:29,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:29,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:30,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:30,237 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T15:35:30,237 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T15:35:30,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T15:35:30,443 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T15:35:30,444 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:35:30,445 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:35:30,446 DEBUG [Time-limited test {}] regionserver.HStore(1541): cac38d334b41e8045bdda283dc300551/info is initiating minor compaction (all files) 2024-12-09T15:35:30,446 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:35:30,446 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:30,446 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of cac38d334b41e8045bdda283dc300551/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:30,446 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1c455a765a0c494783214feb73b7a6f6, hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/5fa12da74b7d43e2902c7c77d24196d0, hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/3b5aab35beba4f5ba70a3bc209df6845] into tmpdir=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp, totalSize=17.7 K 2024-12-09T15:35:30,447 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1c455a765a0c494783214feb73b7a6f6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733758500194 2024-12-09T15:35:30,448 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5fa12da74b7d43e2902c7c77d24196d0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733758510315 2024-12-09T15:35:30,448 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3b5aab35beba4f5ba70a3bc209df6845, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733758520365 2024-12-09T15:35:30,462 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): cac38d334b41e8045bdda283dc300551#info#compaction#47 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:35:30,462 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/9a448743c7f94c6e8e826bd05cb965fc is 1080, key is row0001/info:/1733758500194/Put/seqid=0 2024-12-09T15:35:30,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741841_1017 (size=8296) 2024-12-09T15:35:30,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741841_1017 (size=8296) 2024-12-09T15:35:30,481 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/9a448743c7f94c6e8e826bd05cb965fc as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/9a448743c7f94c6e8e826bd05cb965fc 2024-12-09T15:35:30,491 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cac38d334b41e8045bdda283dc300551/info of cac38d334b41e8045bdda283dc300551 into 9a448743c7f94c6e8e826bd05cb965fc(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:35:30,491 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for cac38d334b41e8045bdda283dc300551: 2024-12-09T15:35:30,495 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C40997%2C1733758488770.1733758530495 2024-12-09T15:35:30,508 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:30,508 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:30,508 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:30,509 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:30,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:30,509 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758520368 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758530495 2024-12-09T15:35:30,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741839_1015 (size=2520) 2024-12-09T15:35:30,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741839_1015 (size=2520) 2024-12-09T15:35:30,519 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758489308 to hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/oldWALs/3651d8ead0f3%2C40997%2C1733758488770.1733758489308 2024-12-09T15:35:30,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38083:38083),(127.0.0.1/127.0.0.1:35795:35795)] 2024-12-09T15:35:30,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T15:35:30,528 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T15:35:30,529 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T15:35:30,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T15:35:30,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-09T15:35:30,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:30,684 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing cac38d334b41e8045bdda283dc300551 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T15:35:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/1f360fb7e4ef49f888dc2b5a9eb172b4 is 1080, key is row0000/info:/1733758530493/Put/seqid=0 2024-12-09T15:35:30,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741843_1019 (size=6033) 2024-12-09T15:35:30,701 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/1f360fb7e4ef49f888dc2b5a9eb172b4 2024-12-09T15:35:30,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741843_1019 (size=6033) 2024-12-09T15:35:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/1f360fb7e4ef49f888dc2b5a9eb172b4 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1f360fb7e4ef49f888dc2b5a9eb172b4 2024-12-09T15:35:30,717 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1f360fb7e4ef49f888dc2b5a9eb172b4, entries=1, sequenceid=18, filesize=5.9 K 2024-12-09T15:35:30,718 INFO [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cac38d334b41e8045bdda283dc300551 in 34ms, sequenceid=18, compaction requested=false 2024-12-09T15:35:30,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for cac38d334b41e8045bdda283dc300551: 2024-12-09T15:35:30,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:30,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T15:35:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T15:35:30,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T15:35:30,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 192 msec 2024-12-09T15:35:30,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 202 msec 2024-12-09T15:35:30,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:30,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:31,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:31,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:31,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:31,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:32,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:32,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:32,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:32,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:33,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:33,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:33,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:33,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:34,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:34,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:34,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:34,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:35,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:35,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:35,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:35,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:35,932 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cac38d334b41e8045bdda283dc300551, had cached 0 bytes from a total of 14329 2024-12-09T15:35:36,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:36,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:36,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:36,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:37,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:37,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:37,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:37,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:38,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:38,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:38,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:38,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:39,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:39,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:39,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:39,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:40,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:40,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T15:35:40,544 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T15:35:40,548 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C40997%2C1733758488770.1733758540548 2024-12-09T15:35:40,613 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:40,614 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:40,614 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:40,614 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:40,616 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:40,617 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758530495 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758540548 2024-12-09T15:35:40,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741842_1018 (size=2026) 2024-12-09T15:35:40,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741842_1018 (size=2026) 2024-12-09T15:35:40,630 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38083:38083),(127.0.0.1/127.0.0.1:35795:35795)] 2024-12-09T15:35:40,630 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758530495 is not closed yet, will try archiving it next time 2024-12-09T15:35:40,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T15:35:40,631 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:35:40,631 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:35:40,631 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:35:40,631 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/WALs/3651d8ead0f3,40997,1733758488770/3651d8ead0f3%2C40997%2C1733758488770.1733758520368 to hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/oldWALs/3651d8ead0f3%2C40997%2C1733758488770.1733758520368 2024-12-09T15:35:40,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:35:40,632 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T15:35:40,632 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T15:35:40,632 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=5139000, stopped=false 2024-12-09T15:35:40,632 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3651d8ead0f3,43765,1733758488561 2024-12-09T15:35:40,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:35:40,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:40,641 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:35:40,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:35:40,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:40,648 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:35:40,648 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:35:40,648 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:35:40,648 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,40997,1733758488770' ***** 2024-12-09T15:35:40,649 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:35:40,652 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:35:40,652 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:35:40,652 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(878): Closing user regions 2024-12-09T15:35:40,652 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(3091): Received CLOSE for cac38d334b41e8045bdda283dc300551 2024-12-09T15:35:40,654 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cac38d334b41e8045bdda283dc300551, disabling compactions & flushes 2024-12-09T15:35:40,654 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:40,654 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:40,654 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. after waiting 0 ms 2024-12-09T15:35:40,654 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:40,654 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing cac38d334b41e8045bdda283dc300551 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T15:35:40,655 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:35:40,656 INFO [RS:0;3651d8ead0f3:40997 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:35:40,656 INFO [RS:0;3651d8ead0f3:40997 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:35:40,656 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,40997,1733758488770 2024-12-09T15:35:40,656 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:35:40,656 INFO [RS:0;3651d8ead0f3:40997 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3651d8ead0f3:40997. 2024-12-09T15:35:40,656 DEBUG [RS:0;3651d8ead0f3:40997 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:35:40,656 DEBUG [RS:0;3651d8ead0f3:40997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:35:40,656 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:35:40,656 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:35:40,659 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:35:40,659 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:35:40,659 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T15:35:40,659 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T15:35:40,659 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1325): Online Regions={cac38d334b41e8045bdda283dc300551=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T15:35:40,659 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cac38d334b41e8045bdda283dc300551 2024-12-09T15:35:40,660 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:35:40,660 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:35:40,660 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:35:40,660 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:35:40,660 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:35:40,660 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-09T15:35:40,684 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/25038fc301ac4146b487a354abe599f8 is 1080, key is row0001/info:/1733758540545/Put/seqid=0 2024-12-09T15:35:40,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741845_1021 (size=6033) 2024-12-09T15:35:40,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741845_1021 (size=6033) 2024-12-09T15:35:40,693 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/25038fc301ac4146b487a354abe599f8 2024-12-09T15:35:40,695 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/info/8dc6021a213845d5aeb373d64899a90d is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551./info:regioninfo/1733758490948/Put/seqid=0 2024-12-09T15:35:40,702 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/.tmp/info/25038fc301ac4146b487a354abe599f8 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/25038fc301ac4146b487a354abe599f8 2024-12-09T15:35:40,716 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/25038fc301ac4146b487a354abe599f8, entries=1, sequenceid=22, filesize=5.9 K 2024-12-09T15:35:40,734 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cac38d334b41e8045bdda283dc300551 in 80ms, sequenceid=22, compaction requested=true 2024-12-09T15:35:40,741 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1c455a765a0c494783214feb73b7a6f6, hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/5fa12da74b7d43e2902c7c77d24196d0, hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/3b5aab35beba4f5ba70a3bc209df6845] to archive 2024-12-09T15:35:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741846_1022 (size=7308) 2024-12-09T15:35:40,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741846_1022 (size=7308) 2024-12-09T15:35:40,744 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T15:35:40,744 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/info/8dc6021a213845d5aeb373d64899a90d 2024-12-09T15:35:40,747 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1c455a765a0c494783214feb73b7a6f6 to hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/1c455a765a0c494783214feb73b7a6f6 2024-12-09T15:35:40,749 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/5fa12da74b7d43e2902c7c77d24196d0 to hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/5fa12da74b7d43e2902c7c77d24196d0 2024-12-09T15:35:40,751 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/3b5aab35beba4f5ba70a3bc209df6845 to hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/info/3b5aab35beba4f5ba70a3bc209df6845 2024-12-09T15:35:40,752 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3651d8ead0f3:43765 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T15:35:40,752 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1c455a765a0c494783214feb73b7a6f6=6033, 5fa12da74b7d43e2902c7c77d24196d0=6033, 3b5aab35beba4f5ba70a3bc209df6845=6033] 2024-12-09T15:35:40,759 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cac38d334b41e8045bdda283dc300551/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-09T15:35:40,760 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:40,760 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cac38d334b41e8045bdda283dc300551: Waiting for close lock at 1733758540654Running coprocessor pre-close hooks at 1733758540654Disabling compacts and flushes for region at 1733758540654Disabling writes for close at 1733758540654Obtaining lock to block concurrent updates at 1733758540654Preparing flush snapshotting stores in cac38d334b41e8045bdda283dc300551 at 1733758540654Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733758540655 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. at 1733758540666 (+11 ms)Flushing cac38d334b41e8045bdda283dc300551/info: creating writer at 1733758540666Flushing cac38d334b41e8045bdda283dc300551/info: appending metadata at 1733758540683 (+17 ms)Flushing cac38d334b41e8045bdda283dc300551/info: closing flushed file at 1733758540683Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c6152e5: reopening flushed file at 1733758540701 (+18 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cac38d334b41e8045bdda283dc300551 in 80ms, sequenceid=22, compaction requested=true at 1733758540735 (+34 ms)Writing region close event to WAL at 1733758540755 (+20 ms)Running coprocessor post-close hooks at 1733758540760 (+5 ms)Closed at 1733758540760 2024-12-09T15:35:40,760 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733758490132.cac38d334b41e8045bdda283dc300551. 2024-12-09T15:35:40,773 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/ns/885275388289499db00372d38349afb7 is 43, key is default/ns:d/1733758489968/Put/seqid=0 2024-12-09T15:35:40,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:40,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741847_1023 (size=5153) 2024-12-09T15:35:40,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741847_1023 (size=5153) 2024-12-09T15:35:40,791 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/ns/885275388289499db00372d38349afb7 2024-12-09T15:35:40,821 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/table/a00767d6c17e408a883fbaae3b272e2d is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733758490959/Put/seqid=0 2024-12-09T15:35:40,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:40,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741848_1024 (size=5508) 2024-12-09T15:35:40,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741848_1024 (size=5508) 2024-12-09T15:35:40,860 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:35:41,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:41,060 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:35:41,152 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T15:35:41,152 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T15:35:41,155 INFO [regionserver/3651d8ead0f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:35:41,243 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/table/a00767d6c17e408a883fbaae3b272e2d 2024-12-09T15:35:41,252 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/info/8dc6021a213845d5aeb373d64899a90d as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/info/8dc6021a213845d5aeb373d64899a90d 2024-12-09T15:35:41,259 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/info/8dc6021a213845d5aeb373d64899a90d, entries=10, sequenceid=11, filesize=7.1 K 2024-12-09T15:35:41,260 DEBUG [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:35:41,263 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/ns/885275388289499db00372d38349afb7 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/ns/885275388289499db00372d38349afb7 2024-12-09T15:35:41,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:41,271 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/ns/885275388289499db00372d38349afb7, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T15:35:41,272 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/.tmp/table/a00767d6c17e408a883fbaae3b272e2d as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/table/a00767d6c17e408a883fbaae3b272e2d 2024-12-09T15:35:41,279 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/table/a00767d6c17e408a883fbaae3b272e2d, entries=2, sequenceid=11, filesize=5.4 K 2024-12-09T15:35:41,280 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 620ms, sequenceid=11, compaction requested=false 2024-12-09T15:35:41,291 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T15:35:41,291 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:35:41,291 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:35:41,292 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758540660Running coprocessor pre-close hooks at 1733758540660Disabling compacts and flushes for region at 1733758540660Disabling writes for close at 1733758540660Obtaining lock to block concurrent updates at 1733758540660Preparing flush snapshotting stores in 1588230740 at 1733758540660Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733758540661 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733758540668 (+7 ms)Flushing 1588230740/info: creating writer at 1733758540668Flushing 1588230740/info: appending metadata at 1733758540694 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733758540694Flushing 1588230740/ns: creating writer at 1733758540752 (+58 ms)Flushing 1588230740/ns: appending metadata at 1733758540772 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733758540773 (+1 ms)Flushing 1588230740/table: creating writer at 1733758540797 (+24 ms)Flushing 1588230740/table: appending metadata at 1733758540820 (+23 ms)Flushing 1588230740/table: closing flushed file at 1733758540820Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21b6aa8a: reopening flushed file at 1733758541251 (+431 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51ec9f1f: reopening flushed file at 1733758541259 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bb12355: reopening flushed file at 1733758541271 (+12 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 620ms, sequenceid=11, compaction requested=false at 1733758541281 (+10 ms)Writing region close event to WAL at 1733758541284 (+3 ms)Running coprocessor post-close hooks at 1733758541291 (+7 ms)Closed at 1733758541291 2024-12-09T15:35:41,292 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T15:35:41,460 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,40997,1733758488770; all regions closed. 2024-12-09T15:35:41,461 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,461 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,461 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,461 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,462 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741834_1010 (size=3306) 2024-12-09T15:35:41,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741834_1010 (size=3306) 2024-12-09T15:35:41,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:35:41,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:35:41,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T15:35:41,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:41,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:41,874 DEBUG [RS:0;3651d8ead0f3:40997 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/oldWALs 2024-12-09T15:35:41,874 INFO [RS:0;3651d8ead0f3:40997 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C40997%2C1733758488770.meta:.meta(num 1733758489892) 2024-12-09T15:35:41,875 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,875 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,875 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,875 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,875 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:41,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741844_1020 (size=1252) 2024-12-09T15:35:41,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741844_1020 (size=1252) 2024-12-09T15:35:41,882 DEBUG [RS:0;3651d8ead0f3:40997 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/oldWALs 2024-12-09T15:35:41,882 INFO [RS:0;3651d8ead0f3:40997 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C40997%2C1733758488770:(num 1733758540548) 2024-12-09T15:35:41,882 DEBUG [RS:0;3651d8ead0f3:40997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:35:41,882 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:35:41,882 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:35:41,882 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T15:35:41,882 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:35:41,882 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:35:41,882 INFO [RS:0;3651d8ead0f3:40997 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40997 2024-12-09T15:35:41,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:35:41,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,40997,1733758488770 2024-12-09T15:35:41,885 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:35:41,888 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,40997,1733758488770] 2024-12-09T15:35:41,890 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,40997,1733758488770 already deleted, retry=false 2024-12-09T15:35:41,890 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,40997,1733758488770 expired; onlineServers=0 2024-12-09T15:35:41,890 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3651d8ead0f3,43765,1733758488561' ***** 2024-12-09T15:35:41,890 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T15:35:41,891 INFO [M:0;3651d8ead0f3:43765 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:35:41,891 INFO [M:0;3651d8ead0f3:43765 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:35:41,891 DEBUG [M:0;3651d8ead0f3:43765 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T15:35:41,891 DEBUG [M:0;3651d8ead0f3:43765 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T15:35:41,891 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T15:35:41,891 INFO [M:0;3651d8ead0f3:43765 {}] hbase.ChoreService(370): Chore service for: master/3651d8ead0f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T15:35:41,891 INFO [M:0;3651d8ead0f3:43765 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:35:41,891 DEBUG [M:0;3651d8ead0f3:43765 {}] master.HMaster(1795): Stopping service threads 2024-12-09T15:35:41,891 INFO [M:0;3651d8ead0f3:43765 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T15:35:41,891 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758489082 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758489082,5,FailOnTimeoutGroup] 2024-12-09T15:35:41,891 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758489086 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758489086,5,FailOnTimeoutGroup] 2024-12-09T15:35:41,892 INFO [M:0;3651d8ead0f3:43765 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:35:41,892 INFO [M:0;3651d8ead0f3:43765 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T15:35:41,892 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T15:35:41,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T15:35:41,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:41,898 DEBUG [M:0;3651d8ead0f3:43765 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T15:35:41,899 DEBUG [M:0;3651d8ead0f3:43765 {}] master.ActiveMasterManager(353): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T15:35:41,900 INFO [M:0;3651d8ead0f3:43765 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/.lastflushedseqids 2024-12-09T15:35:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741849_1025 (size=130) 2024-12-09T15:35:41,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741849_1025 (size=130) 2024-12-09T15:35:41,948 INFO [M:0;3651d8ead0f3:43765 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T15:35:41,948 INFO [M:0;3651d8ead0f3:43765 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T15:35:41,948 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:35:41,948 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:41,948 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:41,948 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:35:41,948 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:41,949 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.58 KB heapSize=54.99 KB 2024-12-09T15:35:41,975 DEBUG [M:0;3651d8ead0f3:43765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/206c180733e94e5b8c649ba0b08d965d is 82, key is hbase:meta,,1/info:regioninfo/1733758489938/Put/seqid=0 2024-12-09T15:35:41,989 INFO [RS:0;3651d8ead0f3:40997 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:35:41,989 INFO [RS:0;3651d8ead0f3:40997 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,40997,1733758488770; zookeeper connection closed. 2024-12-09T15:35:41,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:35:41,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40997-0x1012bd29e610001, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:35:41,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741850_1026 (size=5672) 2024-12-09T15:35:41,990 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28490a6a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28490a6a 2024-12-09T15:35:41,991 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T15:35:41,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741850_1026 (size=5672) 2024-12-09T15:35:42,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:42,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:42,392 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/206c180733e94e5b8c649ba0b08d965d 2024-12-09T15:35:42,418 DEBUG [M:0;3651d8ead0f3:43765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4bb5b1edab0f4b829c1f9c8581452fb9 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733758490964/Put/seqid=0 2024-12-09T15:35:42,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741851_1027 (size=7822) 2024-12-09T15:35:42,443 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.98 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4bb5b1edab0f4b829c1f9c8581452fb9 2024-12-09T15:35:42,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741851_1027 (size=7822) 2024-12-09T15:35:42,448 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4bb5b1edab0f4b829c1f9c8581452fb9 2024-12-09T15:35:42,470 DEBUG [M:0;3651d8ead0f3:43765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c3b887a989f41a0baaedba65530a85a is 69, key is 3651d8ead0f3,40997,1733758488770/rs:state/1733758489135/Put/seqid=0 2024-12-09T15:35:42,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741852_1028 (size=5156) 2024-12-09T15:35:42,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741852_1028 (size=5156) 2024-12-09T15:35:42,486 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c3b887a989f41a0baaedba65530a85a 2024-12-09T15:35:42,512 DEBUG [M:0;3651d8ead0f3:43765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8ae632611a42428c87cd0a0ff079f097 is 52, key is load_balancer_on/state:d/1733758490120/Put/seqid=0 2024-12-09T15:35:42,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741853_1029 (size=5056) 2024-12-09T15:35:42,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741853_1029 (size=5056) 2024-12-09T15:35:42,534 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8ae632611a42428c87cd0a0ff079f097 2024-12-09T15:35:42,540 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/206c180733e94e5b8c649ba0b08d965d as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/206c180733e94e5b8c649ba0b08d965d 2024-12-09T15:35:42,545 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/206c180733e94e5b8c649ba0b08d965d, entries=8, sequenceid=121, filesize=5.5 K 2024-12-09T15:35:42,546 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4bb5b1edab0f4b829c1f9c8581452fb9 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4bb5b1edab0f4b829c1f9c8581452fb9 2024-12-09T15:35:42,551 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4bb5b1edab0f4b829c1f9c8581452fb9 2024-12-09T15:35:42,551 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4bb5b1edab0f4b829c1f9c8581452fb9, entries=14, sequenceid=121, filesize=7.6 K 2024-12-09T15:35:42,552 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c3b887a989f41a0baaedba65530a85a as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c3b887a989f41a0baaedba65530a85a 2024-12-09T15:35:42,558 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c3b887a989f41a0baaedba65530a85a, entries=1, sequenceid=121, filesize=5.0 K 2024-12-09T15:35:42,559 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8ae632611a42428c87cd0a0ff079f097 as hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8ae632611a42428c87cd0a0ff079f097 2024-12-09T15:35:42,565 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32865/user/jenkins/test-data/4e19471b-9f4f-2b5d-d4b9-c80c9693e4be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8ae632611a42428c87cd0a0ff079f097, entries=1, sequenceid=121, filesize=4.9 K 2024-12-09T15:35:42,567 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 619ms, sequenceid=121, compaction requested=false 2024-12-09T15:35:42,569 INFO [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:42,569 DEBUG [M:0;3651d8ead0f3:43765 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758541948Disabling compacts and flushes for region at 1733758541948Disabling writes for close at 1733758541948Obtaining lock to block concurrent updates at 1733758541949 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733758541949Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44629, getHeapSize=56248, getOffHeapSize=0, getCellsCount=140 at 1733758541949Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733758541950 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733758541950Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733758541974 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733758541974Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733758542399 (+425 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733758542417 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733758542417Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733758542448 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733758542469 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733758542469Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733758542492 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733758542512 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733758542512Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e968975: reopening flushed file at 1733758542539 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39bd1a08: reopening flushed file at 1733758542546 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cce8c: reopening flushed file at 1733758542551 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@395f79fe: reopening flushed file at 1733758542558 (+7 ms)Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 619ms, sequenceid=121, compaction requested=false at 1733758542567 (+9 ms)Writing region close event to WAL at 1733758542569 (+2 ms)Closed at 1733758542569 2024-12-09T15:35:42,570 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:42,570 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:42,570 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:42,570 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:42,570 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:35:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41417 is added to blk_1073741830_1006 (size=53026) 2024-12-09T15:35:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33877 is added to blk_1073741830_1006 (size=53026) 2024-12-09T15:35:42,574 INFO [M:0;3651d8ead0f3:43765 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T15:35:42,574 INFO [M:0;3651d8ead0f3:43765 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43765 2024-12-09T15:35:42,574 INFO [M:0;3651d8ead0f3:43765 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:35:42,574 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:35:42,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:35:42,677 INFO [M:0;3651d8ead0f3:43765 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:35:42,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43765-0x1012bd29e610000, quorum=127.0.0.1:52538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:35:42,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@bff2115{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:35:42,683 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a2777cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:35:42,683 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:35:42,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f8e2aa3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:35:42,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8ac3c66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir/,STOPPED} 2024-12-09T15:35:42,685 WARN [BP-1631766011-172.17.0.3-1733758487085 heartbeating to localhost/127.0.0.1:32865 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:35:42,685 WARN [BP-1631766011-172.17.0.3-1733758487085 heartbeating to localhost/127.0.0.1:32865 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1631766011-172.17.0.3-1733758487085 (Datanode Uuid e982f75d-d719-4dd0-b5ae-d4b40cd692ca) service to localhost/127.0.0.1:32865 2024-12-09T15:35:42,685 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:35:42,685 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:35:42,686 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data3/current/BP-1631766011-172.17.0.3-1733758487085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:35:42,686 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data4/current/BP-1631766011-172.17.0.3-1733758487085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:35:42,687 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:35:42,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77f859f0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:35:42,694 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2037cad5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:35:42,694 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:35:42,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@423ae426{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:35:42,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e9b78ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir/,STOPPED} 2024-12-09T15:35:42,697 WARN [BP-1631766011-172.17.0.3-1733758487085 heartbeating to localhost/127.0.0.1:32865 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:35:42,697 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:35:42,697 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:35:42,697 WARN [BP-1631766011-172.17.0.3-1733758487085 heartbeating to localhost/127.0.0.1:32865 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1631766011-172.17.0.3-1733758487085 (Datanode Uuid 64283df8-e8bc-4cd1-892f-220e1e4cfe81) service to localhost/127.0.0.1:32865 2024-12-09T15:35:42,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data1/current/BP-1631766011-172.17.0.3-1733758487085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:35:42,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/cluster_8da5223c-5ae6-0143-5313-b3a94203bc9f/data/data2/current/BP-1631766011-172.17.0.3-1733758487085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:35:42,698 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:35:42,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1367dc96{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:35:42,714 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5206174e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:35:42,714 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:35:42,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ac52070{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:35:42,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1592e8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir/,STOPPED} 2024-12-09T15:35:42,728 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T15:35:42,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T15:35:42,774 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 181) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32865 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32865 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32865 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32865 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32865 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:32865 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32865 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32865 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32865 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=801 (was 970), ProcessCount=11 (was 11), AvailableMemoryMB=4776 (was 5944) 2024-12-09T15:35:42,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:42,787 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=801, ProcessCount=11, AvailableMemoryMB=4776 2024-12-09T15:35:42,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T15:35:42,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.log.dir so I do NOT create it in target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf 2024-12-09T15:35:42,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c8e946d-8fc2-7857-6ee5-32169dfc0c8b/hadoop.tmp.dir so I do NOT create it in target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf 2024-12-09T15:35:42,788 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121, deleteOnExit=true 2024-12-09T15:35:42,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T15:35:42,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/test.cache.data in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T15:35:42,789 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T15:35:42,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/nfs.dump.dir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/java.io.tmpdir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:35:42,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T15:35:42,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T15:35:42,812 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:35:42,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:42,896 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:35:42,900 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:35:42,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:35:42,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:35:42,915 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:35:42,919 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:35:42,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41c3a275{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:35:42,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1520cb76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:35:43,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:43,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@284a8a01{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/java.io.tmpdir/jetty-localhost-38199-hadoop-hdfs-3_4_1-tests_jar-_-any-18356878356005747079/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:35:43,090 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26d44036{HTTP/1.1, (http/1.1)}{localhost:38199} 2024-12-09T15:35:43,090 INFO [Time-limited test {}] server.Server(415): Started @268742ms 2024-12-09T15:35:43,114 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:35:43,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:43,370 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:35:43,378 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:35:43,382 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:35:43,382 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:35:43,383 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:35:43,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c08109c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:35:43,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1724ca70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:35:43,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9530f47{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/java.io.tmpdir/jetty-localhost-36413-hadoop-hdfs-3_4_1-tests_jar-_-any-15631034009859337520/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:35:43,513 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59fd7f75{HTTP/1.1, (http/1.1)}{localhost:36413} 2024-12-09T15:35:43,514 INFO [Time-limited test {}] server.Server(415): Started @269165ms 2024-12-09T15:35:43,515 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:35:43,551 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:35:43,554 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:35:43,555 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:35:43,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:35:43,556 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:35:43,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@208716aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:35:43,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70aed17c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:35:43,635 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data1/current/BP-185995437-172.17.0.3-1733758542821/current, will proceed with Du for space computation calculation, 2024-12-09T15:35:43,635 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data2/current/BP-185995437-172.17.0.3-1733758542821/current, will proceed with Du for space computation calculation, 2024-12-09T15:35:43,661 WARN [Thread-1951 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:35:43,664 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb8645a3d25d6231 with lease ID 0xb92eb8c456a60317: Processing first storage report for DS-0eebd4b6-71e7-48db-a192-f18861dcbaaf from datanode DatanodeRegistration(127.0.0.1:45975, datanodeUuid=4ae49c9f-0a7c-499a-a990-93fd117cd1ce, infoPort=40877, infoSecurePort=0, ipcPort=40295, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821) 2024-12-09T15:35:43,664 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb8645a3d25d6231 with lease ID 0xb92eb8c456a60317: from storage DS-0eebd4b6-71e7-48db-a192-f18861dcbaaf node DatanodeRegistration(127.0.0.1:45975, datanodeUuid=4ae49c9f-0a7c-499a-a990-93fd117cd1ce, infoPort=40877, infoSecurePort=0, ipcPort=40295, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:35:43,665 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb8645a3d25d6231 with lease ID 0xb92eb8c456a60317: Processing first storage report for DS-1348a8b5-ec8b-4337-893d-6dee202fd07c from datanode DatanodeRegistration(127.0.0.1:45975, datanodeUuid=4ae49c9f-0a7c-499a-a990-93fd117cd1ce, infoPort=40877, infoSecurePort=0, ipcPort=40295, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821) 2024-12-09T15:35:43,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb8645a3d25d6231 with lease ID 0xb92eb8c456a60317: from storage DS-1348a8b5-ec8b-4337-893d-6dee202fd07c node DatanodeRegistration(127.0.0.1:45975, datanodeUuid=4ae49c9f-0a7c-499a-a990-93fd117cd1ce, infoPort=40877, infoSecurePort=0, ipcPort=40295, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:35:43,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5eab25ce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/java.io.tmpdir/jetty-localhost-46819-hadoop-hdfs-3_4_1-tests_jar-_-any-1761656718113101761/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:35:43,721 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5773e0ea{HTTP/1.1, (http/1.1)}{localhost:46819} 2024-12-09T15:35:43,721 INFO [Time-limited test {}] server.Server(415): Started @269372ms 2024-12-09T15:35:43,723 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:35:43,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:43,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:43,873 WARN [Thread-1998 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data3/current/BP-185995437-172.17.0.3-1733758542821/current, will proceed with Du for space computation calculation, 2024-12-09T15:35:43,873 WARN [Thread-1999 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data4/current/BP-185995437-172.17.0.3-1733758542821/current, will proceed with Du for space computation calculation, 2024-12-09T15:35:43,891 WARN [Thread-1987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:35:43,893 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c85b5c2b3e9fea0 with lease ID 0xb92eb8c456a60318: Processing first storage report for DS-53659a81-b28f-42dc-9112-a791fea5d783 from datanode DatanodeRegistration(127.0.0.1:34479, datanodeUuid=de1e8045-99e5-4924-8537-610aeadc82c1, infoPort=33241, infoSecurePort=0, ipcPort=35105, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821) 2024-12-09T15:35:43,893 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c85b5c2b3e9fea0 with lease ID 0xb92eb8c456a60318: from storage DS-53659a81-b28f-42dc-9112-a791fea5d783 node DatanodeRegistration(127.0.0.1:34479, datanodeUuid=de1e8045-99e5-4924-8537-610aeadc82c1, infoPort=33241, infoSecurePort=0, ipcPort=35105, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T15:35:43,893 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c85b5c2b3e9fea0 with lease ID 0xb92eb8c456a60318: Processing first storage report for DS-6ae3745d-f955-489d-bccf-398b8b5d0ed6 from datanode DatanodeRegistration(127.0.0.1:34479, datanodeUuid=de1e8045-99e5-4924-8537-610aeadc82c1, infoPort=33241, infoSecurePort=0, ipcPort=35105, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821) 2024-12-09T15:35:43,894 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c85b5c2b3e9fea0 with lease ID 0xb92eb8c456a60318: from storage DS-6ae3745d-f955-489d-bccf-398b8b5d0ed6 node DatanodeRegistration(127.0.0.1:34479, datanodeUuid=de1e8045-99e5-4924-8537-610aeadc82c1, infoPort=33241, infoSecurePort=0, ipcPort=35105, storageInfo=lv=-57;cid=testClusterID;nsid=1780921090;c=1733758542821), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:35:43,919 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf 2024-12-09T15:35:43,923 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/zookeeper_0, clientPort=51267, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T15:35:43,924 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51267 2024-12-09T15:35:43,924 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:43,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:43,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:35:43,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:35:43,937 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7 with version=8 2024-12-09T15:35:43,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase-staging 2024-12-09T15:35:43,939 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:35:43,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:35:43,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:35:43,939 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:35:43,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:35:43,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:35:43,939 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T15:35:43,939 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:35:43,940 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38049 2024-12-09T15:35:43,941 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38049 connecting to ZooKeeper ensemble=127.0.0.1:51267 2024-12-09T15:35:43,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:380490x0, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:35:43,950 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38049-0x1012bd378510000 connected 2024-12-09T15:35:43,968 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:43,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:43,972 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:35:43,972 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7, hbase.cluster.distributed=false 2024-12-09T15:35:43,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:35:43,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38049 2024-12-09T15:35:43,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38049 2024-12-09T15:35:43,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38049 2024-12-09T15:35:43,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38049 2024-12-09T15:35:43,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38049 2024-12-09T15:35:43,995 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:35:43,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:35:43,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:35:43,995 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:35:43,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:35:43,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:35:43,995 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:35:43,995 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:35:43,996 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45989 2024-12-09T15:35:43,997 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45989 connecting to ZooKeeper ensemble=127.0.0.1:51267 2024-12-09T15:35:43,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:43,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:44,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459890x0, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:35:44,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459890x0, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:35:44,004 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:35:44,006 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45989-0x1012bd378510001 connected 2024-12-09T15:35:44,009 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:35:44,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T15:35:44,011 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:35:44,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45989 2024-12-09T15:35:44,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45989 2024-12-09T15:35:44,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45989 2024-12-09T15:35:44,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45989 2024-12-09T15:35:44,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45989 2024-12-09T15:35:44,034 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3651d8ead0f3:38049 2024-12-09T15:35:44,034 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:44,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:35:44,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:35:44,039 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:44,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T15:35:44,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,042 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:35:44,043 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3651d8ead0f3,38049,1733758543939 from backup master directory 2024-12-09T15:35:44,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:44,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:44,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:35:44,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:35:44,044 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:35:44,044 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:44,068 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/hbase.id] with ID: 62bcb63c-b041-460c-b9b9-654c2984c915 2024-12-09T15:35:44,068 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/.tmp/hbase.id 2024-12-09T15:35:44,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:35:44,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:35:44,093 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/.tmp/hbase.id]:[hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/hbase.id] 2024-12-09T15:35:44,110 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:44,110 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T15:35:44,112 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T15:35:44,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:35:44,154 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:35:44,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:35:44,155 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T15:35:44,158 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:35:44,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:35:44,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:35:44,256 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store 2024-12-09T15:35:44,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:44,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:35:44,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:35:44,309 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:35:44,309 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:35:44,309 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:44,309 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:44,309 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:35:44,309 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:44,309 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:35:44,309 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758544309Disabling compacts and flushes for region at 1733758544309Disabling writes for close at 1733758544309Writing region close event to WAL at 1733758544309Closed at 1733758544309 2024-12-09T15:35:44,312 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/.initializing 2024-12-09T15:35:44,312 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/WALs/3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:44,333 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C38049%2C1733758543939, suffix=, logDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/WALs/3651d8ead0f3,38049,1733758543939, archiveDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/oldWALs, maxLogs=10 2024-12-09T15:35:44,333 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C38049%2C1733758543939.1733758544333 2024-12-09T15:35:44,363 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/WALs/3651d8ead0f3,38049,1733758543939/3651d8ead0f3%2C38049%2C1733758543939.1733758544333 2024-12-09T15:35:44,369 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33241:33241),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:35:44,378 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:35:44,379 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:35:44,379 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,379 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T15:35:44,393 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:44,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T15:35:44,396 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:35:44,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T15:35:44,398 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:35:44,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T15:35:44,401 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:35:44,406 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,410 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,410 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,412 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,412 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,412 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T15:35:44,413 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:35:44,419 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:35:44,419 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743740, jitterRate=-0.05428627133369446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T15:35:44,420 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733758544379Initializing all the Stores at 1733758544380 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758544380Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758544387 (+7 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758544387Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758544387Cleaning up temporary data from old regions at 1733758544412 (+25 ms)Region opened successfully at 1733758544420 (+8 ms) 2024-12-09T15:35:44,421 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T15:35:44,432 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56b80666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:35:44,433 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T15:35:44,433 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T15:35:44,433 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T15:35:44,433 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T15:35:44,436 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-12-09T15:35:44,436 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T15:35:44,436 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T15:35:44,439 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T15:35:44,440 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T15:35:44,445 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T15:35:44,445 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T15:35:44,446 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T15:35:44,447 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T15:35:44,448 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T15:35:44,453 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T15:35:44,455 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T15:35:44,456 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T15:35:44,459 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T15:35:44,461 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T15:35:44,462 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T15:35:44,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:35:44,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:35:44,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,466 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3651d8ead0f3,38049,1733758543939, sessionid=0x1012bd378510000, setting cluster-up flag (Was=false) 2024-12-09T15:35:44,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,476 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T15:35:44,478 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:44,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,497 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T15:35:44,499 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:44,503 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T15:35:44,505 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T15:35:44,506 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T15:35:44,506 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T15:35:44,506 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3651d8ead0f3,38049,1733758543939 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T15:35:44,508 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:35:44,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:35:44,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:35:44,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:35:44,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3651d8ead0f3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T15:35:44,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:35:44,509 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,520 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733758574519 2024-12-09T15:35:44,520 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T15:35:44,520 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T15:35:44,520 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T15:35:44,520 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T15:35:44,520 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T15:35:44,520 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T15:35:44,520 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:35:44,520 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T15:35:44,521 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,522 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T15:35:44,522 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,522 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T15:35:44,522 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T15:35:44,522 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T15:35:44,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T15:35:44,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T15:35:44,523 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758544523,5,FailOnTimeoutGroup] 2024-12-09T15:35:44,523 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758544523,5,FailOnTimeoutGroup] 2024-12-09T15:35:44,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T15:35:44,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,523 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:35:44,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:35:44,547 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(746): ClusterId : 62bcb63c-b041-460c-b9b9-654c2984c915 2024-12-09T15:35:44,547 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:35:44,550 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:35:44,550 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:35:44,551 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T15:35:44,551 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7 2024-12-09T15:35:44,552 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:35:44,552 DEBUG [RS:0;3651d8ead0f3:45989 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a80bc69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:35:44,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:35:44,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:35:44,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:35:44,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:35:44,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:35:44,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:44,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:35:44,565 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:35:44,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:44,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:35:44,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:35:44,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:44,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:35:44,569 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:35:44,569 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:44,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:44,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:35:44,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740 2024-12-09T15:35:44,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740 2024-12-09T15:35:44,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:35:44,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:35:44,571 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3651d8ead0f3:45989 2024-12-09T15:35:44,572 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:35:44,572 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:35:44,572 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:35:44,572 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:35:44,572 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,38049,1733758543939 with port=45989, startcode=1733758543994 2024-12-09T15:35:44,573 DEBUG [RS:0;3651d8ead0f3:45989 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:35:44,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:35:44,575 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:35:44,575 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808593, jitterRate=0.028179839253425598}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:35:44,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733758544562Initializing all the Stores at 1733758544562Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758544562Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758544562Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758544562Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758544562Cleaning up temporary data from old regions at 1733758544571 (+9 ms)Region opened successfully at 1733758544576 (+5 ms) 2024-12-09T15:35:44,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:35:44,576 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:35:44,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:35:44,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:35:44,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:35:44,576 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34787, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:35:44,577 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:35:44,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758544576Disabling compacts and flushes for region at 1733758544576Disabling writes for close at 1733758544576Writing region close event to WAL at 1733758544577 (+1 ms)Closed at 1733758544577 2024-12-09T15:35:44,577 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:44,577 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38049 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:44,578 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:35:44,578 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T15:35:44,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T15:35:44,579 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7 2024-12-09T15:35:44,579 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35067 2024-12-09T15:35:44,579 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:35:44,580 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:35:44,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:35:44,581 DEBUG [RS:0;3651d8ead0f3:45989 {}] zookeeper.ZKUtil(111): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:44,581 WARN [RS:0;3651d8ead0f3:45989 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:35:44,581 INFO [RS:0;3651d8ead0f3:45989 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:35:44,581 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T15:35:44,581 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:44,582 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,45989,1733758543994] 2024-12-09T15:35:44,585 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:35:44,586 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:35:44,588 INFO [RS:0;3651d8ead0f3:45989 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:35:44,589 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,589 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:35:44,590 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:35:44,590 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:35:44,590 DEBUG [RS:0;3651d8ead0f3:45989 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:35:44,590 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,591 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,591 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,591 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,591 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,591 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,45989,1733758543994-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:35:44,611 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:35:44,611 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,45989,1733758543994-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,611 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,611 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.Replication(171): 3651d8ead0f3,45989,1733758543994 started 2024-12-09T15:35:44,626 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:44,627 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,45989,1733758543994, RpcServer on 3651d8ead0f3/172.17.0.3:45989, sessionid=0x1012bd378510001 2024-12-09T15:35:44,627 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:35:44,627 DEBUG [RS:0;3651d8ead0f3:45989 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:44,627 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,45989,1733758543994' 2024-12-09T15:35:44,627 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:35:44,627 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:35:44,628 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:35:44,628 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:35:44,628 DEBUG [RS:0;3651d8ead0f3:45989 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:44,628 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,45989,1733758543994' 2024-12-09T15:35:44,628 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:35:44,628 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:35:44,629 DEBUG [RS:0;3651d8ead0f3:45989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:35:44,629 INFO [RS:0;3651d8ead0f3:45989 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:35:44,629 INFO [RS:0;3651d8ead0f3:45989 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:35:44,732 WARN [3651d8ead0f3:38049 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T15:35:44,733 INFO [RS:0;3651d8ead0f3:45989 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C45989%2C1733758543994, suffix=, logDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994, archiveDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/oldWALs, maxLogs=32 2024-12-09T15:35:44,733 INFO [RS:0;3651d8ead0f3:45989 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C45989%2C1733758543994.1733758544733 2024-12-09T15:35:44,756 INFO [RS:0;3651d8ead0f3:45989 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758544733 2024-12-09T15:35:44,771 DEBUG [RS:0;3651d8ead0f3:45989 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33241:33241),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:35:44,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:44,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:44,982 DEBUG [3651d8ead0f3:38049 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T15:35:44,983 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:44,984 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,45989,1733758543994, state=OPENING 2024-12-09T15:35:44,986 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T15:35:44,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:35:44,990 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:35:44,990 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:35:44,990 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:35:44,990 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,45989,1733758543994}] 2024-12-09T15:35:45,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:45,145 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T15:35:45,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38001, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T15:35:45,156 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T15:35:45,156 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:35:45,163 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C45989%2C1733758543994.meta, suffix=.meta, logDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994, archiveDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/oldWALs, maxLogs=32 2024-12-09T15:35:45,164 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C45989%2C1733758543994.meta.1733758545163.meta 2024-12-09T15:35:45,179 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.meta.1733758545163.meta 2024-12-09T15:35:45,186 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33241:33241),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:35:45,191 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:35:45,192 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T15:35:45,192 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T15:35:45,192 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T15:35:45,192 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T15:35:45,192 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:35:45,192 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T15:35:45,192 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T15:35:45,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:35:45,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:35:45,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:45,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:45,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:35:45,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:35:45,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:45,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:45,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:35:45,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:35:45,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:45,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:45,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:35:45,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:35:45,201 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:45,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:35:45,202 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:35:45,203 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740 2024-12-09T15:35:45,204 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740 2024-12-09T15:35:45,206 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:35:45,206 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:35:45,206 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:35:45,208 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:35:45,209 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810483, jitterRate=0.030582427978515625}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:35:45,209 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T15:35:45,210 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733758545192Writing region info on filesystem at 1733758545192Initializing all the Stores at 1733758545193 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758545193Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758545194 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758545194Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758545194Cleaning up temporary data from old regions at 1733758545206 (+12 ms)Running coprocessor post-open hooks at 1733758545209 (+3 ms)Region opened successfully at 1733758545210 (+1 ms) 2024-12-09T15:35:45,211 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733758545144 2024-12-09T15:35:45,214 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T15:35:45,214 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T15:35:45,215 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:45,216 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,45989,1733758543994, state=OPEN 2024-12-09T15:35:45,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:35:45,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:35:45,227 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:45,227 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:35:45,227 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:35:45,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T15:35:45,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,45989,1733758543994 in 237 msec 2024-12-09T15:35:45,238 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T15:35:45,238 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 653 msec 2024-12-09T15:35:45,251 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:35:45,251 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T15:35:45,253 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:35:45,253 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,45989,1733758543994, seqNum=-1] 2024-12-09T15:35:45,253 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:35:45,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45113, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:35:45,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 759 msec 2024-12-09T15:35:45,266 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733758545266, completionTime=-1 2024-12-09T15:35:45,266 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T15:35:45,266 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T15:35:45,272 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T15:35:45,272 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733758605272 2024-12-09T15:35:45,272 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733758665272 2024-12-09T15:35:45,272 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 5 msec 2024-12-09T15:35:45,272 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,38049,1733758543939-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:45,272 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,38049,1733758543939-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:45,272 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,38049,1733758543939-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:45,273 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3651d8ead0f3:38049, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:45,273 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:45,274 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:45,276 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T15:35:45,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.236sec 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,38049,1733758543939-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:35:45,280 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,38049,1733758543939-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T15:35:45,290 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T15:35:45,290 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T15:35:45,290 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,38049,1733758543939-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:35:45,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@324924bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:35:45,346 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3651d8ead0f3,38049,-1 for getting cluster id 2024-12-09T15:35:45,346 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T15:35:45,347 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '62bcb63c-b041-460c-b9b9-654c2984c915' 2024-12-09T15:35:45,348 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T15:35:45,348 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "62bcb63c-b041-460c-b9b9-654c2984c915" 2024-12-09T15:35:45,348 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27dd99ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:35:45,348 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3651d8ead0f3,38049,-1] 2024-12-09T15:35:45,348 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T15:35:45,349 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:35:45,350 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57470, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T15:35:45,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ce088a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:35:45,351 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:35:45,352 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,45989,1733758543994, seqNum=-1] 2024-12-09T15:35:45,352 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:35:45,354 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46776, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:35:45,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:45,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:35:45,363 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T15:35:45,364 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T15:35:45,365 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 3651d8ead0f3,38049,1733758543939 2024-12-09T15:35:45,365 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37746974 2024-12-09T15:35:45,366 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T15:35:45,370 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T15:35:45,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T15:35:45,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T15:35:45,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:35:45,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-09T15:35:45,374 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T15:35:45,375 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:45,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-09T15:35:45,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:35:45,376 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T15:35:45,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741835_1011 (size=381) 2024-12-09T15:35:45,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741835_1011 (size=381) 2024-12-09T15:35:45,404 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e4cb41ff21765f272c4aaa91a5fdf38d, NAME => 'TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7 2024-12-09T15:35:45,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741836_1012 (size=64) 2024-12-09T15:35:45,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:35:45,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing e4cb41ff21765f272c4aaa91a5fdf38d, disabling compactions & flushes 2024-12-09T15:35:45,437 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:45,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:45,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. after waiting 0 ms 2024-12-09T15:35:45,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:45,437 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:45,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for e4cb41ff21765f272c4aaa91a5fdf38d: Waiting for close lock at 1733758545437Disabling compacts and flushes for region at 1733758545437Disabling writes for close at 1733758545437Writing region close event to WAL at 1733758545437Closed at 1733758545437 2024-12-09T15:35:45,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741836_1012 (size=64) 2024-12-09T15:35:45,441 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T15:35:45,442 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733758545441"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733758545441"}]},"ts":"1733758545441"} 2024-12-09T15:35:45,451 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T15:35:45,452 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T15:35:45,453 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758545452"}]},"ts":"1733758545452"} 2024-12-09T15:35:45,455 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-09T15:35:45,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, ASSIGN}] 2024-12-09T15:35:45,457 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, ASSIGN 2024-12-09T15:35:45,458 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, ASSIGN; state=OFFLINE, location=3651d8ead0f3,45989,1733758543994; forceNewPlan=false, retain=false 2024-12-09T15:35:45,609 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4cb41ff21765f272c4aaa91a5fdf38d, regionState=OPENING, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:45,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, ASSIGN because future has completed 2024-12-09T15:35:45,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994}] 2024-12-09T15:35:45,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,771 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:45,772 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e4cb41ff21765f272c4aaa91a5fdf38d, NAME => 'TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:35:45,772 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,772 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:35:45,772 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,772 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,774 INFO [StoreOpener-e4cb41ff21765f272c4aaa91a5fdf38d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,776 INFO [StoreOpener-e4cb41ff21765f272c4aaa91a5fdf38d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4cb41ff21765f272c4aaa91a5fdf38d columnFamilyName info 2024-12-09T15:35:45,776 DEBUG [StoreOpener-e4cb41ff21765f272c4aaa91a5fdf38d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:35:45,776 INFO [StoreOpener-e4cb41ff21765f272c4aaa91a5fdf38d-1 {}] regionserver.HStore(327): Store=e4cb41ff21765f272c4aaa91a5fdf38d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:35:45,776 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,777 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,779 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,779 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,779 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:45,782 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:45,798 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:35:45,799 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e4cb41ff21765f272c4aaa91a5fdf38d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693512, jitterRate=-0.11815455555915833}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T15:35:45,799 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:45,800 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e4cb41ff21765f272c4aaa91a5fdf38d: Running coprocessor pre-open hook at 1733758545772Writing region info on filesystem at 1733758545772Initializing all the Stores at 1733758545773 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758545773Cleaning up temporary data from old regions at 1733758545779 (+6 ms)Running coprocessor post-open hooks at 1733758545799 (+20 ms)Region opened successfully at 1733758545800 (+1 ms) 2024-12-09T15:35:45,802 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., pid=6, masterSystemTime=1733758545766 2024-12-09T15:35:45,808 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:45,808 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:45,816 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4cb41ff21765f272c4aaa91a5fdf38d, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:45,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 because future has completed 2024-12-09T15:35:45,821 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=3651d8ead0f3,45989,1733758543994, table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T15:35:45,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:45,849 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T15:35:45,849 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 in 226 msec 2024-12-09T15:35:45,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T15:35:45,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, ASSIGN in 394 msec 2024-12-09T15:35:45,860 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T15:35:45,860 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733758545860"}]},"ts":"1733758545860"} 2024-12-09T15:35:45,864 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-09T15:35:45,866 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T15:35:45,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 495 msec 2024-12-09T15:35:46,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:46,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:46,294 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:35:46,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:46,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:46,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:47,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:47,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:47,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:47,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:48,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:48,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:48,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:48,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:49,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:49,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:49,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:49,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:50,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:50,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:50,585 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T15:35:50,586 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-09T15:35:50,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:50,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:51,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:51,274 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:35:51,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:51,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:35:51,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T15:35:51,689 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T15:35:51,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:35:51,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T15:35:51,690 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T15:35:51,690 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T15:35:51,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:51,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:52,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:52,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:52,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:52,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:53,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:53,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:53,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:53,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:54,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:54,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:54,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:54,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:55,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:55,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T15:35:55,464 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-09T15:35:55,464 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-09T15:35:55,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-09T15:35:55,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:35:55,470 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2] 2024-12-09T15:35:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:35:55,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4cb41ff21765f272c4aaa91a5fdf38d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:35:55,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/1503978f46064ac6acdf43a08fa1df92 is 1080, key is row0001/info:/1733758555471/Put/seqid=0 2024-12-09T15:35:55,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741837_1013 (size=12509) 2024-12-09T15:35:55,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741837_1013 (size=12509) 2024-12-09T15:35:55,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/1503978f46064ac6acdf43a08fa1df92 2024-12-09T15:35:55,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/1503978f46064ac6acdf43a08fa1df92 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1503978f46064ac6acdf43a08fa1df92 2024-12-09T15:35:55,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T15:35:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:46776 deadline: 1733758565575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 2024-12-09T15:35:55,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1503978f46064ac6acdf43a08fa1df92, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T15:35:55,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for e4cb41ff21765f272c4aaa91a5fdf38d in 108ms, sequenceid=11, compaction requested=false 2024-12-09T15:35:55,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:35:55,614 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:35:55,615 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:35:55,615 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 because the exception is null or not the one we care about 2024-12-09T15:35:55,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:55,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:56,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:56,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:56,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:56,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:57,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:57,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:57,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:57,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:58,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:58,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:58,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:58,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:59,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:59,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:59,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:35:59,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:00,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:00,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:00,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:00,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:01,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:01,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:01,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:01,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:02,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:02,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:02,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:02,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:03,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:03,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:03,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:03,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:04,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:04,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:04,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:04,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:05,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:05,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:05,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4cb41ff21765f272c4aaa91a5fdf38d 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T15:36:05,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/f7f4a5fde02147ba9a778791c8a5513f is 1080, key is row0008/info:/1733758555489/Put/seqid=0 2024-12-09T15:36:05,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741838_1014 (size=29761) 2024-12-09T15:36:05,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741838_1014 (size=29761) 2024-12-09T15:36:05,723 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/f7f4a5fde02147ba9a778791c8a5513f 2024-12-09T15:36:05,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/f7f4a5fde02147ba9a778791c8a5513f as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f 2024-12-09T15:36:05,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f, entries=23, sequenceid=37, filesize=29.1 K 2024-12-09T15:36:05,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for e4cb41ff21765f272c4aaa91a5fdf38d in 125ms, sequenceid=37, compaction requested=false 2024-12-09T15:36:05,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:36:05,790 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-09T15:36:05,790 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:05,791 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f because midkey is the same as first or last row 2024-12-09T15:36:05,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:05,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:06,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:06,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:06,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:06,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:07,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:07,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:07,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4cb41ff21765f272c4aaa91a5fdf38d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:36:07,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/a472d9c847e8478abf9c37d6288403cb is 1080, key is row0031/info:/1733758565669/Put/seqid=0 2024-12-09T15:36:07,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741839_1015 (size=12509) 2024-12-09T15:36:07,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741839_1015 (size=12509) 2024-12-09T15:36:07,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/a472d9c847e8478abf9c37d6288403cb 2024-12-09T15:36:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/a472d9c847e8478abf9c37d6288403cb as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/a472d9c847e8478abf9c37d6288403cb 2024-12-09T15:36:07,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/a472d9c847e8478abf9c37d6288403cb, entries=7, sequenceid=47, filesize=12.2 K 2024-12-09T15:36:07,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for e4cb41ff21765f272c4aaa91a5fdf38d in 49ms, sequenceid=47, compaction requested=true 2024-12-09T15:36:07,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:36:07,743 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-09T15:36:07,743 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:07,743 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f because midkey is the same as first or last row 2024-12-09T15:36:07,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:07,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4cb41ff21765f272c4aaa91a5fdf38d:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:36:07,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:07,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4cb41ff21765f272c4aaa91a5fdf38d 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T15:36:07,750 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:36:07,765 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:36:07,765 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): e4cb41ff21765f272c4aaa91a5fdf38d/info is initiating minor compaction (all files) 2024-12-09T15:36:07,765 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e4cb41ff21765f272c4aaa91a5fdf38d/info in TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:36:07,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/1c4cc509a1724c44a636672b9b5f25c7 is 1080, key is row0038/info:/1733758567695/Put/seqid=0 2024-12-09T15:36:07,767 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1503978f46064ac6acdf43a08fa1df92, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/a472d9c847e8478abf9c37d6288403cb] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp, totalSize=53.5 K 2024-12-09T15:36:07,767 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1503978f46064ac6acdf43a08fa1df92, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733758555471 2024-12-09T15:36:07,768 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting f7f4a5fde02147ba9a778791c8a5513f, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733758555489 2024-12-09T15:36:07,770 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting a472d9c847e8478abf9c37d6288403cb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733758565669 2024-12-09T15:36:07,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741840_1016 (size=24376) 2024-12-09T15:36:07,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:07,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741840_1016 (size=24376) 2024-12-09T15:36:07,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=68 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/1c4cc509a1724c44a636672b9b5f25c7 2024-12-09T15:36:07,824 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4cb41ff21765f272c4aaa91a5fdf38d#info#compaction#61 average throughput is 7.59 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:36:07,825 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/961508b230224d36af3ba64f5ddd9a57 is 1080, key is row0001/info:/1733758555471/Put/seqid=0 2024-12-09T15:36:07,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/1c4cc509a1724c44a636672b9b5f25c7 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1c4cc509a1724c44a636672b9b5f25c7 2024-12-09T15:36:07,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1c4cc509a1724c44a636672b9b5f25c7, entries=18, sequenceid=68, filesize=23.8 K 2024-12-09T15:36:07,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=9.46 KB/9684 for e4cb41ff21765f272c4aaa91a5fdf38d in 94ms, sequenceid=68, compaction requested=false 2024-12-09T15:36:07,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:36:07,841 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.3 K, sizeToCheck=16.0 K 2024-12-09T15:36:07,841 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:07,841 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f because midkey is the same as first or last row 2024-12-09T15:36:07,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:07,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741841_1017 (size=44978) 2024-12-09T15:36:07,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741841_1017 (size=44978) 2024-12-09T15:36:07,867 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/961508b230224d36af3ba64f5ddd9a57 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 2024-12-09T15:36:07,875 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e4cb41ff21765f272c4aaa91a5fdf38d/info of e4cb41ff21765f272c4aaa91a5fdf38d into 961508b230224d36af3ba64f5ddd9a57(size=43.9 K), total size for store is 67.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:36:07,875 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., storeName=e4cb41ff21765f272c4aaa91a5fdf38d/info, priority=13, startTime=1733758567743; duration=0sec 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 because midkey is the same as first or last row 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 because midkey is the same as first or last row 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:07,875 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 because midkey is the same as first or last row 2024-12-09T15:36:07,876 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:07,876 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4cb41ff21765f272c4aaa91a5fdf38d:info 2024-12-09T15:36:08,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:08,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:08,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:08,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:09,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:09,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:09,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:09,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4cb41ff21765f272c4aaa91a5fdf38d 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T15:36:09,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:09,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/5621de7032384f729acaaa562738e6b7 is 1080, key is row0056/info:/1733758567748/Put/seqid=0 2024-12-09T15:36:09,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741842_1018 (size=15740) 2024-12-09T15:36:09,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741842_1018 (size=15740) 2024-12-09T15:36:09,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/5621de7032384f729acaaa562738e6b7 2024-12-09T15:36:09,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/5621de7032384f729acaaa562738e6b7 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/5621de7032384f729acaaa562738e6b7 2024-12-09T15:36:09,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/5621de7032384f729acaaa562738e6b7, entries=10, sequenceid=82, filesize=15.4 K 2024-12-09T15:36:09,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=13.66 KB/13988 for e4cb41ff21765f272c4aaa91a5fdf38d in 33ms, sequenceid=82, compaction requested=true 2024-12-09T15:36:09,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:36:09,829 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-09T15:36:09,829 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:09,829 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 because midkey is the same as first or last row 2024-12-09T15:36:09,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4cb41ff21765f272c4aaa91a5fdf38d:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:36:09,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:09,830 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:36:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:09,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4cb41ff21765f272c4aaa91a5fdf38d 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-09T15:36:09,831 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:36:09,831 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): e4cb41ff21765f272c4aaa91a5fdf38d/info is initiating minor compaction (all files) 2024-12-09T15:36:09,831 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e4cb41ff21765f272c4aaa91a5fdf38d/info in TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:36:09,831 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1c4cc509a1724c44a636672b9b5f25c7, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/5621de7032384f729acaaa562738e6b7] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp, totalSize=83.1 K 2024-12-09T15:36:09,833 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 961508b230224d36af3ba64f5ddd9a57, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733758555471 2024-12-09T15:36:09,834 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c4cc509a1724c44a636672b9b5f25c7, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1733758567695 2024-12-09T15:36:09,834 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5621de7032384f729acaaa562738e6b7, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733758567748 2024-12-09T15:36:09,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/7cac9a1e9bd6411e82edfa02d2cfd64e is 1080, key is row0066/info:/1733758569797/Put/seqid=0 2024-12-09T15:36:09,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:09,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T15:36:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:46776 deadline: 1733758579861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:09,863 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:09,863 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:09,863 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 because the exception is null or not the one we care about 2024-12-09T15:36:09,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741843_1019 (size=20064) 2024-12-09T15:36:09,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741843_1019 (size=20064) 2024-12-09T15:36:09,870 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4cb41ff21765f272c4aaa91a5fdf38d#info#compaction#64 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:36:09,871 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/8cd328a9ef254adaaa3d4865125c98bc is 1080, key is row0001/info:/1733758555471/Put/seqid=0 2024-12-09T15:36:09,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/7cac9a1e9bd6411e82edfa02d2cfd64e 2024-12-09T15:36:09,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741844_1020 (size=75378) 2024-12-09T15:36:09,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741844_1020 (size=75378) 2024-12-09T15:36:09,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/7cac9a1e9bd6411e82edfa02d2cfd64e as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/7cac9a1e9bd6411e82edfa02d2cfd64e 2024-12-09T15:36:09,925 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/8cd328a9ef254adaaa3d4865125c98bc as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc 2024-12-09T15:36:09,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/7cac9a1e9bd6411e82edfa02d2cfd64e, entries=14, sequenceid=99, filesize=19.6 K 2024-12-09T15:36:09,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for e4cb41ff21765f272c4aaa91a5fdf38d in 100ms, sequenceid=99, compaction requested=false 2024-12-09T15:36:09,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:36:09,930 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-12-09T15:36:09,930 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:09,930 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 because midkey is the same as first or last row 2024-12-09T15:36:09,938 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e4cb41ff21765f272c4aaa91a5fdf38d/info of e4cb41ff21765f272c4aaa91a5fdf38d into 8cd328a9ef254adaaa3d4865125c98bc(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:36:09,938 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e4cb41ff21765f272c4aaa91a5fdf38d: 2024-12-09T15:36:09,938 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., storeName=e4cb41ff21765f272c4aaa91a5fdf38d/info, priority=13, startTime=1733758569829; duration=0sec 2024-12-09T15:36:09,938 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-09T15:36:09,938 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:09,939 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-09T15:36:09,939 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:09,939 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-09T15:36:09,939 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T15:36:09,940 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:09,940 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:09,940 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4cb41ff21765f272c4aaa91a5fdf38d:info 2024-12-09T15:36:09,941 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38049 {}] assignment.AssignmentManager(1363): Split request from 3651d8ead0f3,45989,1733758543994, parent={ENCODED => e4cb41ff21765f272c4aaa91a5fdf38d, NAME => 'TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T15:36:09,948 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38049 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:09,953 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38049 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e4cb41ff21765f272c4aaa91a5fdf38d, daughterA=04c086fc533fb33cebb40916dc3df491, daughterB=68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:09,955 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e4cb41ff21765f272c4aaa91a5fdf38d, daughterA=04c086fc533fb33cebb40916dc3df491, daughterB=68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:09,955 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e4cb41ff21765f272c4aaa91a5fdf38d, daughterA=04c086fc533fb33cebb40916dc3df491, daughterB=68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:09,955 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e4cb41ff21765f272c4aaa91a5fdf38d, daughterA=04c086fc533fb33cebb40916dc3df491, daughterB=68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:09,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, UNASSIGN}] 2024-12-09T15:36:09,964 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, UNASSIGN 2024-12-09T15:36:09,966 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=e4cb41ff21765f272c4aaa91a5fdf38d, regionState=CLOSING, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:09,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, UNASSIGN because future has completed 2024-12-09T15:36:09,975 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T15:36:09,975 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994}] 2024-12-09T15:36:10,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:10,141 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,141 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T15:36:10,142 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing e4cb41ff21765f272c4aaa91a5fdf38d, disabling compactions & flushes 2024-12-09T15:36:10,142 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:36:10,142 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:36:10,142 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. after waiting 0 ms 2024-12-09T15:36:10,142 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:36:10,142 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing e4cb41ff21765f272c4aaa91a5fdf38d 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-09T15:36:10,160 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/198372afb8c64f48b36814b1e29e8a53 is 1080, key is row0080/info:/1733758569832/Put/seqid=0 2024-12-09T15:36:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741845_1021 (size=21141) 2024-12-09T15:36:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741845_1021 (size=21141) 2024-12-09T15:36:10,191 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/198372afb8c64f48b36814b1e29e8a53 2024-12-09T15:36:10,213 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/.tmp/info/198372afb8c64f48b36814b1e29e8a53 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/198372afb8c64f48b36814b1e29e8a53 2024-12-09T15:36:10,224 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/198372afb8c64f48b36814b1e29e8a53, entries=15, sequenceid=118, filesize=20.6 K 2024-12-09T15:36:10,239 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for e4cb41ff21765f272c4aaa91a5fdf38d in 97ms, sequenceid=118, compaction requested=true 2024-12-09T15:36:10,247 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1503978f46064ac6acdf43a08fa1df92, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/a472d9c847e8478abf9c37d6288403cb, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1c4cc509a1724c44a636672b9b5f25c7, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/5621de7032384f729acaaa562738e6b7] to archive 2024-12-09T15:36:10,254 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T15:36:10,270 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1503978f46064ac6acdf43a08fa1df92 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1503978f46064ac6acdf43a08fa1df92 2024-12-09T15:36:10,273 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/f7f4a5fde02147ba9a778791c8a5513f 2024-12-09T15:36:10,277 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/961508b230224d36af3ba64f5ddd9a57 2024-12-09T15:36:10,278 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/a472d9c847e8478abf9c37d6288403cb to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/a472d9c847e8478abf9c37d6288403cb 2024-12-09T15:36:10,280 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1c4cc509a1724c44a636672b9b5f25c7 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/1c4cc509a1724c44a636672b9b5f25c7 2024-12-09T15:36:10,281 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/5621de7032384f729acaaa562738e6b7 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/5621de7032384f729acaaa562738e6b7 2024-12-09T15:36:10,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:10,331 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-12-09T15:36:10,333 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. 2024-12-09T15:36:10,333 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for e4cb41ff21765f272c4aaa91a5fdf38d: Waiting for close lock at 1733758570142Running coprocessor pre-close hooks at 1733758570142Disabling compacts and flushes for region at 1733758570142Disabling writes for close at 1733758570142Obtaining lock to block concurrent updates at 1733758570142Preparing flush snapshotting stores in e4cb41ff21765f272c4aaa91a5fdf38d at 1733758570142Finished memstore snapshotting TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., syncing WAL and waiting on mvcc, flushsize=dataSize=16140, getHeapSize=17520, getOffHeapSize=0, getCellsCount=15 at 1733758570143 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. at 1733758570148 (+5 ms)Flushing e4cb41ff21765f272c4aaa91a5fdf38d/info: creating writer at 1733758570148Flushing e4cb41ff21765f272c4aaa91a5fdf38d/info: appending metadata at 1733758570160 (+12 ms)Flushing e4cb41ff21765f272c4aaa91a5fdf38d/info: closing flushed file at 1733758570160Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b1f27f2: reopening flushed file at 1733758570209 (+49 ms)Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for e4cb41ff21765f272c4aaa91a5fdf38d in 97ms, sequenceid=118, compaction requested=true at 1733758570239 (+30 ms)Writing region close event to WAL at 1733758570295 (+56 ms)Running coprocessor post-close hooks at 1733758570333 (+38 ms)Closed at 1733758570333 2024-12-09T15:36:10,339 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,340 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=e4cb41ff21765f272c4aaa91a5fdf38d, regionState=CLOSED 2024-12-09T15:36:10,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 because future has completed 2024-12-09T15:36:10,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-09T15:36:10,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure e4cb41ff21765f272c4aaa91a5fdf38d, server=3651d8ead0f3,45989,1733758543994 in 405 msec 2024-12-09T15:36:10,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T15:36:10,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e4cb41ff21765f272c4aaa91a5fdf38d, UNASSIGN in 430 msec 2024-12-09T15:36:10,489 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:36:10,494 INFO [PEWorker-2 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=e4cb41ff21765f272c4aaa91a5fdf38d, threads=3 2024-12-09T15:36:10,500 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/198372afb8c64f48b36814b1e29e8a53 for region: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,506 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/7cac9a1e9bd6411e82edfa02d2cfd64e for region: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,507 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc for region: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,535 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/198372afb8c64f48b36814b1e29e8a53, top=true 2024-12-09T15:36:10,553 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/7cac9a1e9bd6411e82edfa02d2cfd64e, top=true 2024-12-09T15:36:10,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741846_1022 (size=27) 2024-12-09T15:36:10,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741846_1022 (size=27) 2024-12-09T15:36:10,560 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53 for child: 68461f97512168f0a84fb8a3ee8a16d8, parent: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,560 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/198372afb8c64f48b36814b1e29e8a53 for region: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,587 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e for child: 68461f97512168f0a84fb8a3ee8a16d8, parent: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,588 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/7cac9a1e9bd6411e82edfa02d2cfd64e for region: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:10,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:10,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:11,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741847_1023 (size=27) 2024-12-09T15:36:11,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741847_1023 (size=27) 2024-12-09T15:36:11,016 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc for region: e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:36:11,018 DEBUG [PEWorker-2 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region e4cb41ff21765f272c4aaa91a5fdf38d Daughter A: [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d] storefiles, Daughter B: [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d] storefiles. 2024-12-09T15:36:11,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:11,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741848_1024 (size=71) 2024-12-09T15:36:11,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741848_1024 (size=71) 2024-12-09T15:36:11,123 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:36:11,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741849_1025 (size=71) 2024-12-09T15:36:11,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741849_1025 (size=71) 2024-12-09T15:36:11,208 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:36:11,232 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-09T15:36:11,246 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-09T15:36:11,252 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733758571251"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733758571251"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733758571251"}]},"ts":"1733758571251"} 2024-12-09T15:36:11,252 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733758571251"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733758571251"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733758571251"}]},"ts":"1733758571251"} 2024-12-09T15:36:11,252 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733758571251"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733758571251"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733758571251"}]},"ts":"1733758571251"} 2024-12-09T15:36:11,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=04c086fc533fb33cebb40916dc3df491, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68461f97512168f0a84fb8a3ee8a16d8, ASSIGN}] 2024-12-09T15:36:11,282 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=04c086fc533fb33cebb40916dc3df491, ASSIGN 2024-12-09T15:36:11,282 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68461f97512168f0a84fb8a3ee8a16d8, ASSIGN 2024-12-09T15:36:11,289 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=04c086fc533fb33cebb40916dc3df491, ASSIGN; state=SPLITTING_NEW, location=3651d8ead0f3,45989,1733758543994; forceNewPlan=false, retain=false 2024-12-09T15:36:11,289 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68461f97512168f0a84fb8a3ee8a16d8, ASSIGN; state=SPLITTING_NEW, location=3651d8ead0f3,45989,1733758543994; forceNewPlan=false, retain=false 2024-12-09T15:36:11,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:11,441 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=68461f97512168f0a84fb8a3ee8a16d8, regionState=OPENING, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:11,442 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=04c086fc533fb33cebb40916dc3df491, regionState=OPENING, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:11,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68461f97512168f0a84fb8a3ee8a16d8, ASSIGN because future has completed 2024-12-09T15:36:11,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994}] 2024-12-09T15:36:11,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=04c086fc533fb33cebb40916dc3df491, ASSIGN because future has completed 2024-12-09T15:36:11,459 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04c086fc533fb33cebb40916dc3df491, server=3651d8ead0f3,45989,1733758543994}] 2024-12-09T15:36:11,608 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:36:11,608 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 68461f97512168f0a84fb8a3ee8a16d8, NAME => 'TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-09T15:36:11,609 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,609 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:36:11,609 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,609 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,611 INFO [StoreOpener-68461f97512168f0a84fb8a3ee8a16d8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,612 INFO [StoreOpener-68461f97512168f0a84fb8a3ee8a16d8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 68461f97512168f0a84fb8a3ee8a16d8 columnFamilyName info 2024-12-09T15:36:11,612 DEBUG [StoreOpener-68461f97512168f0a84fb8a3ee8a16d8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:36:11,634 DEBUG [StoreOpener-68461f97512168f0a84fb8a3ee8a16d8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d->hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc-top 2024-12-09T15:36:11,651 DEBUG [StoreOpener-68461f97512168f0a84fb8a3ee8a16d8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53 2024-12-09T15:36:11,655 DEBUG [StoreOpener-68461f97512168f0a84fb8a3ee8a16d8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e 2024-12-09T15:36:11,656 INFO [StoreOpener-68461f97512168f0a84fb8a3ee8a16d8-1 {}] regionserver.HStore(327): Store=68461f97512168f0a84fb8a3ee8a16d8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:36:11,656 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,657 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,658 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,660 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,660 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,664 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,665 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 68461f97512168f0a84fb8a3ee8a16d8; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851763, jitterRate=0.08307285606861115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T15:36:11,665 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:11,666 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 68461f97512168f0a84fb8a3ee8a16d8: Running coprocessor pre-open hook at 1733758571609Writing region info on filesystem at 1733758571609Initializing all the Stores at 1733758571611 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758571611Cleaning up temporary data from old regions at 1733758571660 (+49 ms)Running coprocessor post-open hooks at 1733758571665 (+5 ms)Region opened successfully at 1733758571666 (+1 ms) 2024-12-09T15:36:11,667 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., pid=12, masterSystemTime=1733758571604 2024-12-09T15:36:11,668 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 68461f97512168f0a84fb8a3ee8a16d8:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:36:11,668 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:11,668 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:36:11,670 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:36:11,670 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): 68461f97512168f0a84fb8a3ee8a16d8/info is initiating minor compaction (all files) 2024-12-09T15:36:11,671 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68461f97512168f0a84fb8a3ee8a16d8/info in TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:36:11,671 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d->hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc-top, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp, totalSize=113.9 K 2024-12-09T15:36:11,671 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:36:11,671 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:36:11,671 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733758555471 2024-12-09T15:36:11,672 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:36:11,672 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=68461f97512168f0a84fb8a3ee8a16d8, regionState=OPEN, openSeqNum=122, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:11,672 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 04c086fc533fb33cebb40916dc3df491, NAME => 'TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-09T15:36:11,672 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,672 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:36:11,673 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,673 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,674 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733758569797 2024-12-09T15:36:11,674 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-09T15:36:11,674 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-09T15:36:11,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-09T15:36:11,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 because future has completed 2024-12-09T15:36:11,675 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733758569832 2024-12-09T15:36:11,680 INFO [StoreOpener-04c086fc533fb33cebb40916dc3df491-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,687 INFO [StoreOpener-04c086fc533fb33cebb40916dc3df491-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04c086fc533fb33cebb40916dc3df491 columnFamilyName info 2024-12-09T15:36:11,687 DEBUG [StoreOpener-04c086fc533fb33cebb40916dc3df491-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:36:11,709 DEBUG [StoreOpener-04c086fc533fb33cebb40916dc3df491-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d->hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc-bottom 2024-12-09T15:36:11,709 INFO [StoreOpener-04c086fc533fb33cebb40916dc3df491-1 {}] regionserver.HStore(327): Store=04c086fc533fb33cebb40916dc3df491/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:36:11,709 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,711 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T15:36:11,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 in 244 msec 2024-12-09T15:36:11,711 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=3651d8ead0f3,45989,1733758543994, table=TestLogRolling-testLogRolling, region=68461f97512168f0a84fb8a3ee8a16d8. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T15:36:11,716 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,718 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,718 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68461f97512168f0a84fb8a3ee8a16d8, ASSIGN in 431 msec 2024-12-09T15:36:11,721 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,722 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 04c086fc533fb33cebb40916dc3df491; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810933, jitterRate=0.031154781579971313}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T15:36:11,722 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:36:11,722 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 04c086fc533fb33cebb40916dc3df491: Running coprocessor pre-open hook at 1733758571673Writing region info on filesystem at 1733758571673Initializing all the Stores at 1733758571676 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758571676Cleaning up temporary data from old regions at 1733758571718 (+42 ms)Running coprocessor post-open hooks at 1733758571722 (+4 ms)Region opened successfully at 1733758571722 2024-12-09T15:36:11,727 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491., pid=13, masterSystemTime=1733758571604 2024-12-09T15:36:11,727 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 04c086fc533fb33cebb40916dc3df491:info, priority=-2147483648, current under compaction store size is 2 2024-12-09T15:36:11,727 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T15:36:11,728 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T15:36:11,731 DEBUG [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:36:11,731 INFO [RS_OPEN_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:36:11,731 INFO [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:36:11,731 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.HStore(1541): 04c086fc533fb33cebb40916dc3df491/info is initiating minor compaction (all files) 2024-12-09T15:36:11,732 INFO [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 04c086fc533fb33cebb40916dc3df491/info in TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:36:11,732 INFO [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d->hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc-bottom] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/.tmp, totalSize=73.6 K 2024-12-09T15:36:11,735 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] compactions.Compactor(225): Compacting 8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733758555471 2024-12-09T15:36:11,736 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=04c086fc533fb33cebb40916dc3df491, regionState=OPEN, openSeqNum=122, regionLocation=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:11,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/info/7af679fb05c24ac3a220fd83acd76f6a is 193, key is TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8./info:regioninfo/1733758571672/Put/seqid=0 2024-12-09T15:36:11,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04c086fc533fb33cebb40916dc3df491, server=3651d8ead0f3,45989,1733758543994 because future has completed 2024-12-09T15:36:11,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-09T15:36:11,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 04c086fc533fb33cebb40916dc3df491, server=3651d8ead0f3,45989,1733758543994 in 299 msec 2024-12-09T15:36:11,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-09T15:36:11,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=04c086fc533fb33cebb40916dc3df491, ASSIGN in 488 msec 2024-12-09T15:36:11,776 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68461f97512168f0a84fb8a3ee8a16d8#info#compaction#67 average throughput is 5.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:36:11,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e4cb41ff21765f272c4aaa91a5fdf38d, daughterA=04c086fc533fb33cebb40916dc3df491, daughterB=68461f97512168f0a84fb8a3ee8a16d8 in 1.8230 sec 2024-12-09T15:36:11,776 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/cc977974ce6942c6aa359cd481d045a3 is 1080, key is row0062/info:/1733758567783/Put/seqid=0 2024-12-09T15:36:11,789 INFO [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 04c086fc533fb33cebb40916dc3df491#info#compaction#68 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:36:11,790 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/.tmp/info/8720814708df4e20806ef2a5ecf6e679 is 1080, key is row0001/info:/1733758555471/Put/seqid=0 2024-12-09T15:36:11,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741850_1026 (size=9882) 2024-12-09T15:36:11,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741850_1026 (size=9882) 2024-12-09T15:36:11,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:11,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/info/7af679fb05c24ac3a220fd83acd76f6a 2024-12-09T15:36:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741851_1027 (size=40830) 2024-12-09T15:36:11,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741851_1027 (size=40830) 2024-12-09T15:36:11,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:11,860 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/cc977974ce6942c6aa359cd481d045a3 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/cc977974ce6942c6aa359cd481d045a3 2024-12-09T15:36:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741852_1028 (size=70862) 2024-12-09T15:36:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741852_1028 (size=70862) 2024-12-09T15:36:11,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/ns/3b63626ff6c1434bbdcbf0e36798a3e1 is 43, key is default/ns:d/1733758545256/Put/seqid=0 2024-12-09T15:36:11,876 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68461f97512168f0a84fb8a3ee8a16d8/info of 68461f97512168f0a84fb8a3ee8a16d8 into cc977974ce6942c6aa359cd481d045a3(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:36:11,876 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:36:11,876 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., storeName=68461f97512168f0a84fb8a3ee8a16d8/info, priority=13, startTime=1733758571668; duration=0sec 2024-12-09T15:36:11,876 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:11,876 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68461f97512168f0a84fb8a3ee8a16d8:info 2024-12-09T15:36:11,900 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/.tmp/info/8720814708df4e20806ef2a5ecf6e679 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/info/8720814708df4e20806ef2a5ecf6e679 2024-12-09T15:36:11,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741853_1029 (size=5153) 2024-12-09T15:36:11,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741853_1029 (size=5153) 2024-12-09T15:36:11,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/ns/3b63626ff6c1434bbdcbf0e36798a3e1 2024-12-09T15:36:11,930 INFO [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 04c086fc533fb33cebb40916dc3df491/info of 04c086fc533fb33cebb40916dc3df491 into 8720814708df4e20806ef2a5ecf6e679(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:36:11,930 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 04c086fc533fb33cebb40916dc3df491: 2024-12-09T15:36:11,930 INFO [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491., storeName=04c086fc533fb33cebb40916dc3df491/info, priority=15, startTime=1733758571727; duration=0sec 2024-12-09T15:36:11,930 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:11,930 DEBUG [RS:0;3651d8ead0f3:45989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 04c086fc533fb33cebb40916dc3df491:info 2024-12-09T15:36:11,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/table/f7c8e9272b014568a0cda3a395ab86c8 is 65, key is TestLogRolling-testLogRolling/table:state/1733758545860/Put/seqid=0 2024-12-09T15:36:11,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741854_1030 (size=5340) 2024-12-09T15:36:11,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741854_1030 (size=5340) 2024-12-09T15:36:11,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/table/f7c8e9272b014568a0cda3a395ab86c8 2024-12-09T15:36:11,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/info/7af679fb05c24ac3a220fd83acd76f6a as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/info/7af679fb05c24ac3a220fd83acd76f6a 2024-12-09T15:36:12,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/info/7af679fb05c24ac3a220fd83acd76f6a, entries=30, sequenceid=17, filesize=9.7 K 2024-12-09T15:36:12,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/ns/3b63626ff6c1434bbdcbf0e36798a3e1 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/ns/3b63626ff6c1434bbdcbf0e36798a3e1 2024-12-09T15:36:12,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/ns/3b63626ff6c1434bbdcbf0e36798a3e1, entries=2, sequenceid=17, filesize=5.0 K 2024-12-09T15:36:12,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/table/f7c8e9272b014568a0cda3a395ab86c8 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/table/f7c8e9272b014568a0cda3a395ab86c8 2024-12-09T15:36:12,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/table/f7c8e9272b014568a0cda3a395ab86c8, entries=2, sequenceid=17, filesize=5.2 K 2024-12-09T15:36:12,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 369ms, sequenceid=17, compaction requested=false 2024-12-09T15:36:12,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T15:36:12,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:12,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:12,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:12,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:13,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:13,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:13,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:13,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:13,919 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:36:14,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:14,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:14,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:14,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:15,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:15,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:15,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,369 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:15,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:15,882 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:36:15,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:15,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:16,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:16,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:16,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:16,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:17,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:17,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:17,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:17,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:18,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:18,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:18,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:18,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:19,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:19,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:19,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:19,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:46776 deadline: 1733758589963, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. is not online on 3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:19,964 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. is not online on 3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:19,964 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d. is not online on 3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:19,964 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733758545370.e4cb41ff21765f272c4aaa91a5fdf38d., hostname=3651d8ead0f3,45989,1733758543994, seqNum=2 from cache 2024-12-09T15:36:20,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:20,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:20,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:20,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:21,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:21,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:21,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:21,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:22,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:22,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:22,361 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=4, created chunk count=9, reused chunk count=58, reuseRatio=86.57% 2024-12-09T15:36:22,362 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T15:36:22,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:22,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:23,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:23,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:23,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:23,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:24,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:24,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:24,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:24,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:25,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:25,291 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T15:36:25,291 INFO [master/3651d8ead0f3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T15:36:25,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:25,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:25,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:26,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:26,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:26,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:26,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:27,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:27,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:27,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:27,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:28,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:28,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:28,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:28,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:29,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:29,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:29,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:29,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:29,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:30,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:30,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:30,334 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:36:30,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:36:30,408 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-09T15:36:30,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:30,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:31,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:31,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:31,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:31,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:32,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:32,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:32,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:32,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:33,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:33,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:33,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:33,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:34,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:34,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:34,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:34,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:35,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:35,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:35,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:35,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:35,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 after 196229ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:36:36,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:36,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:36,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:36,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:37,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:37,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:37,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:37,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:38,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:38,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:38,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:38,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:39,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:39,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:39,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:39,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:40,039 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0095', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122] 2024-12-09T15:36:40,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:40,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:40,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:40,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:41,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:41,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:41,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:41,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:42,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:36:42,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/2292f657e3a848449aa7cc907f116f3e is 1080, key is row0095/info:/1733758600041/Put/seqid=0 2024-12-09T15:36:42,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741855_1031 (size=12513) 2024-12-09T15:36:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741855_1031 (size=12513) 2024-12-09T15:36:42,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/2292f657e3a848449aa7cc907f116f3e 2024-12-09T15:36:42,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/2292f657e3a848449aa7cc907f116f3e as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2292f657e3a848449aa7cc907f116f3e 2024-12-09T15:36:42,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2292f657e3a848449aa7cc907f116f3e, entries=7, sequenceid=132, filesize=12.2 K 2024-12-09T15:36:42,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 68461f97512168f0a84fb8a3ee8a16d8 in 51ms, sequenceid=132, compaction requested=false 2024-12-09T15:36:42,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:36:42,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:42,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T15:36:42,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/863a6bd60c9845939e6401a5f1fd822a is 1080, key is row0102/info:/1733758602055/Put/seqid=0 2024-12-09T15:36:42,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741856_1032 (size=24394) 2024-12-09T15:36:42,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741856_1032 (size=24394) 2024-12-09T15:36:42,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/863a6bd60c9845939e6401a5f1fd822a 2024-12-09T15:36:42,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/863a6bd60c9845939e6401a5f1fd822a as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/863a6bd60c9845939e6401a5f1fd822a 2024-12-09T15:36:42,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/863a6bd60c9845939e6401a5f1fd822a, entries=18, sequenceid=153, filesize=23.8 K 2024-12-09T15:36:42,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=9.46 KB/9684 for 68461f97512168f0a84fb8a3ee8a16d8 in 55ms, sequenceid=153, compaction requested=true 2024-12-09T15:36:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:36:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68461f97512168f0a84fb8a3ee8a16d8:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:36:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:42,164 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:36:42,167 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77737 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:36:42,167 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): 68461f97512168f0a84fb8a3ee8a16d8/info is initiating minor compaction (all files) 2024-12-09T15:36:42,167 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68461f97512168f0a84fb8a3ee8a16d8/info in TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:36:42,169 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/cc977974ce6942c6aa359cd481d045a3, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2292f657e3a848449aa7cc907f116f3e, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/863a6bd60c9845939e6401a5f1fd822a] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp, totalSize=75.9 K 2024-12-09T15:36:42,169 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc977974ce6942c6aa359cd481d045a3, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733758567783 2024-12-09T15:36:42,173 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2292f657e3a848449aa7cc907f116f3e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733758600041 2024-12-09T15:36:42,174 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 863a6bd60c9845939e6401a5f1fd822a, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733758602055 2024-12-09T15:36:42,199 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68461f97512168f0a84fb8a3ee8a16d8#info#compaction#73 average throughput is 29.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:36:42,200 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/911c81e595964a94a40125c851379367 is 1080, key is row0062/info:/1733758567783/Put/seqid=0 2024-12-09T15:36:42,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741857_1033 (size=67947) 2024-12-09T15:36:42,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741857_1033 (size=67947) 2024-12-09T15:36:42,227 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/911c81e595964a94a40125c851379367 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/911c81e595964a94a40125c851379367 2024-12-09T15:36:42,234 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68461f97512168f0a84fb8a3ee8a16d8/info of 68461f97512168f0a84fb8a3ee8a16d8 into 911c81e595964a94a40125c851379367(size=66.4 K), total size for store is 66.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:36:42,234 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:36:42,234 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., storeName=68461f97512168f0a84fb8a3ee8a16d8/info, priority=13, startTime=1733758602164; duration=0sec 2024-12-09T15:36:42,234 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:42,234 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68461f97512168f0a84fb8a3ee8a16d8:info 2024-12-09T15:36:42,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:42,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:42,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:43,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:43,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:43,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:43,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:43,920 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:36:44,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:44,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:44,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T15:36:44,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/6a31273e45df4a29a578321502bcc4b9 is 1080, key is row0120/info:/1733758602109/Put/seqid=0 2024-12-09T15:36:44,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741858_1034 (size=15750) 2024-12-09T15:36:44,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741858_1034 (size=15750) 2024-12-09T15:36:44,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:44,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T15:36:44,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:46776 deadline: 1733758614337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:44,339 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 , the old value is region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:44,339 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:44,339 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 because the exception is null or not the one we care about 2024-12-09T15:36:44,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/6a31273e45df4a29a578321502bcc4b9 2024-12-09T15:36:44,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/6a31273e45df4a29a578321502bcc4b9 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/6a31273e45df4a29a578321502bcc4b9 2024-12-09T15:36:44,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/6a31273e45df4a29a578321502bcc4b9, entries=10, sequenceid=167, filesize=15.4 K 2024-12-09T15:36:44,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=19.96 KB/20444 for 68461f97512168f0a84fb8a3ee8a16d8 in 480ms, sequenceid=167, compaction requested=false 2024-12-09T15:36:44,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:36:44,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:44,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:45,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:45,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:45,450 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T15:36:45,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:45,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:46,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:46,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:46,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:46,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:47,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:47,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:47,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:47,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:48,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:48,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:48,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:48,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:49,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:49,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:49,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:49,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:50,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:50,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:50,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:50,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:51,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:51,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:51,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:51,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:52,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:52,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:52,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:52,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:53,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:53,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:53,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:53,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 after 196231ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:36:53,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:54,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:54,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:54,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:36:54,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-09T15:36:54,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/f3cfbe35e6f446ec91eac04d17100921 is 1080, key is row0130/info:/1733758604144/Put/seqid=0 2024-12-09T15:36:54,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741859_1035 (size=26550) 2024-12-09T15:36:54,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741859_1035 (size=26550) 2024-12-09T15:36:54,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T15:36:54,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:46776 deadline: 1733758624480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 2024-12-09T15:36:54,483 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 , the old value is region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:54,483 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:36:54,483 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 because the exception is null or not the one we care about 2024-12-09T15:36:54,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:54,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/f3cfbe35e6f446ec91eac04d17100921 2024-12-09T15:36:54,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:54,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/f3cfbe35e6f446ec91eac04d17100921 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f3cfbe35e6f446ec91eac04d17100921 2024-12-09T15:36:54,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f3cfbe35e6f446ec91eac04d17100921, entries=20, sequenceid=190, filesize=25.9 K 2024-12-09T15:36:54,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 68461f97512168f0a84fb8a3ee8a16d8 in 481ms, sequenceid=190, compaction requested=true 2024-12-09T15:36:54,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:36:54,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68461f97512168f0a84fb8a3ee8a16d8:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:36:54,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:54,915 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:36:54,927 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110247 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:36:54,927 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): 68461f97512168f0a84fb8a3ee8a16d8/info is initiating minor compaction (all files) 2024-12-09T15:36:54,927 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68461f97512168f0a84fb8a3ee8a16d8/info in TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:36:54,927 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/911c81e595964a94a40125c851379367, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/6a31273e45df4a29a578321502bcc4b9, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f3cfbe35e6f446ec91eac04d17100921] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp, totalSize=107.7 K 2024-12-09T15:36:54,928 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 911c81e595964a94a40125c851379367, keycount=58, bloomtype=ROW, size=66.4 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733758567783 2024-12-09T15:36:54,935 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a31273e45df4a29a578321502bcc4b9, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733758602109 2024-12-09T15:36:54,940 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting f3cfbe35e6f446ec91eac04d17100921, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733758604144 2024-12-09T15:36:54,973 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68461f97512168f0a84fb8a3ee8a16d8#info#compaction#76 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:36:54,977 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/96bc4f44f22d442d8ae983014d0d5ef6 is 1080, key is row0062/info:/1733758567783/Put/seqid=0 2024-12-09T15:36:55,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741860_1036 (size=100466) 2024-12-09T15:36:55,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741860_1036 (size=100466) 2024-12-09T15:36:55,028 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/96bc4f44f22d442d8ae983014d0d5ef6 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/96bc4f44f22d442d8ae983014d0d5ef6 2024-12-09T15:36:55,039 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68461f97512168f0a84fb8a3ee8a16d8/info of 68461f97512168f0a84fb8a3ee8a16d8 into 96bc4f44f22d442d8ae983014d0d5ef6(size=98.1 K), total size for store is 98.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:36:55,040 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:36:55,040 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., storeName=68461f97512168f0a84fb8a3ee8a16d8/info, priority=13, startTime=1733758614915; duration=0sec 2024-12-09T15:36:55,040 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:36:55,040 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68461f97512168f0a84fb8a3ee8a16d8:info 2024-12-09T15:36:55,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:55,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:55,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:55,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:56,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:56,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:56,609 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 68461f97512168f0a84fb8a3ee8a16d8, had cached 0 bytes from a total of 100466 2024-12-09T15:36:56,672 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04c086fc533fb33cebb40916dc3df491, had cached 0 bytes from a total of 70862 2024-12-09T15:36:56,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:56,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:57,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:57,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:57,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:57,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:58,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:58,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:58,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:58,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:59,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:59,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:59,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:36:59,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:00,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:00,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:00,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:00,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:01,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:01,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:01,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:01,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:02,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:02,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 after 196173ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:37:02,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:02,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta after 196202ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T15:37:02,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:02,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:03,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:03,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:03,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:03,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:04,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:04,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:04,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:04,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T15:37:04,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/db6383251f0e469e9fa88bd0561c6eb1 is 1080, key is row0150/info:/1733758614436/Put/seqid=0 2024-12-09T15:37:04,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741861_1037 (size=15750) 2024-12-09T15:37:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741861_1037 (size=15750) 2024-12-09T15:37:04,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/db6383251f0e469e9fa88bd0561c6eb1 2024-12-09T15:37:04,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/db6383251f0e469e9fa88bd0561c6eb1 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/db6383251f0e469e9fa88bd0561c6eb1 2024-12-09T15:37:04,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/db6383251f0e469e9fa88bd0561c6eb1, entries=10, sequenceid=204, filesize=15.4 K 2024-12-09T15:37:04,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=1.05 KB/1076 for 68461f97512168f0a84fb8a3ee8a16d8 in 22ms, sequenceid=204, compaction requested=false 2024-12-09T15:37:04,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:04,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:04,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:05,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:05,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:05,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:05,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:06,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:06,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:06,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:37:06,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/30c3ceed0e004ab6bf58c440455684f6 is 1080, key is row0160/info:/1733758624526/Put/seqid=0 2024-12-09T15:37:06,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741862_1038 (size=12516) 2024-12-09T15:37:06,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/30c3ceed0e004ab6bf58c440455684f6 2024-12-09T15:37:06,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741862_1038 (size=12516) 2024-12-09T15:37:06,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/30c3ceed0e004ab6bf58c440455684f6 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/30c3ceed0e004ab6bf58c440455684f6 2024-12-09T15:37:06,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/30c3ceed0e004ab6bf58c440455684f6, entries=7, sequenceid=214, filesize=12.2 K 2024-12-09T15:37:06,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 68461f97512168f0a84fb8a3ee8a16d8 in 101ms, sequenceid=214, compaction requested=true 2024-12-09T15:37:06,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:06,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68461f97512168f0a84fb8a3ee8a16d8:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:37:06,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:06,643 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:37:06,645 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128732 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:37:06,645 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): 68461f97512168f0a84fb8a3ee8a16d8/info is initiating minor compaction (all files) 2024-12-09T15:37:06,645 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68461f97512168f0a84fb8a3ee8a16d8/info in TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:06,645 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/96bc4f44f22d442d8ae983014d0d5ef6, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/db6383251f0e469e9fa88bd0561c6eb1, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/30c3ceed0e004ab6bf58c440455684f6] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp, totalSize=125.7 K 2024-12-09T15:37:06,646 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96bc4f44f22d442d8ae983014d0d5ef6, keycount=88, bloomtype=ROW, size=98.1 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733758567783 2024-12-09T15:37:06,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-09T15:37:06,646 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting db6383251f0e469e9fa88bd0561c6eb1, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733758614436 2024-12-09T15:37:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:06,647 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 30c3ceed0e004ab6bf58c440455684f6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733758624526 2024-12-09T15:37:06,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/2af96809d7c148fea9b84a03814116ea is 1080, key is row0167/info:/1733758626544/Put/seqid=0 2024-12-09T15:37:06,682 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68461f97512168f0a84fb8a3ee8a16d8#info#compaction#80 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:37:06,683 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/f75aed28d1024ee7a4804053f8ea2e2e is 1080, key is row0062/info:/1733758567783/Put/seqid=0 2024-12-09T15:37:06,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741863_1039 (size=27628) 2024-12-09T15:37:06,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/2af96809d7c148fea9b84a03814116ea 2024-12-09T15:37:06,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741863_1039 (size=27628) 2024-12-09T15:37:06,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/2af96809d7c148fea9b84a03814116ea as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2af96809d7c148fea9b84a03814116ea 2024-12-09T15:37:06,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2af96809d7c148fea9b84a03814116ea, entries=21, sequenceid=238, filesize=27.0 K 2024-12-09T15:37:06,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741864_1040 (size=118898) 2024-12-09T15:37:06,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741864_1040 (size=118898) 2024-12-09T15:37:06,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=5.25 KB/5380 for 68461f97512168f0a84fb8a3ee8a16d8 in 88ms, sequenceid=238, compaction requested=false 2024-12-09T15:37:06,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:06,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:06,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:07,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:07,135 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/f75aed28d1024ee7a4804053f8ea2e2e as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f75aed28d1024ee7a4804053f8ea2e2e 2024-12-09T15:37:07,144 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68461f97512168f0a84fb8a3ee8a16d8/info of 68461f97512168f0a84fb8a3ee8a16d8 into f75aed28d1024ee7a4804053f8ea2e2e(size=116.1 K), total size for store is 143.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:37:07,145 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:07,145 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., storeName=68461f97512168f0a84fb8a3ee8a16d8/info, priority=13, startTime=1733758626643; duration=0sec 2024-12-09T15:37:07,145 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:07,145 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68461f97512168f0a84fb8a3ee8a16d8:info 2024-12-09T15:37:07,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:07,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:07,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:08,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:08,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:08,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:37:08,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/55c41755dee74474ba5ce5fcd9f3b6d4 is 1080, key is row0188/info:/1733758626649/Put/seqid=0 2024-12-09T15:37:08,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741865_1041 (size=12516) 2024-12-09T15:37:08,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741865_1041 (size=12516) 2024-12-09T15:37:08,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/55c41755dee74474ba5ce5fcd9f3b6d4 2024-12-09T15:37:08,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/55c41755dee74474ba5ce5fcd9f3b6d4 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/55c41755dee74474ba5ce5fcd9f3b6d4 2024-12-09T15:37:08,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/55c41755dee74474ba5ce5fcd9f3b6d4, entries=7, sequenceid=249, filesize=12.2 K 2024-12-09T15:37:08,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 68461f97512168f0a84fb8a3ee8a16d8 in 28ms, sequenceid=249, compaction requested=true 2024-12-09T15:37:08,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:08,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:08,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68461f97512168f0a84fb8a3ee8a16d8:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:37:08,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:08,699 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:37:08,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T15:37:08,700 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 159042 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:37:08,700 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): 68461f97512168f0a84fb8a3ee8a16d8/info is initiating minor compaction (all files) 2024-12-09T15:37:08,700 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68461f97512168f0a84fb8a3ee8a16d8/info in TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:08,700 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f75aed28d1024ee7a4804053f8ea2e2e, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2af96809d7c148fea9b84a03814116ea, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/55c41755dee74474ba5ce5fcd9f3b6d4] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp, totalSize=155.3 K 2024-12-09T15:37:08,700 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting f75aed28d1024ee7a4804053f8ea2e2e, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733758567783 2024-12-09T15:37:08,701 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2af96809d7c148fea9b84a03814116ea, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733758626544 2024-12-09T15:37:08,701 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 55c41755dee74474ba5ce5fcd9f3b6d4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733758626649 2024-12-09T15:37:08,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/442aae658fbc4839b081d610eae93b97 is 1080, key is row0195/info:/1733758628671/Put/seqid=0 2024-12-09T15:37:08,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741866_1042 (size=24407) 2024-12-09T15:37:08,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741866_1042 (size=24407) 2024-12-09T15:37:08,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/442aae658fbc4839b081d610eae93b97 2024-12-09T15:37:08,713 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68461f97512168f0a84fb8a3ee8a16d8#info#compaction#83 average throughput is 68.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:37:08,714 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/76546390f0ca40cbbbf5032df30af196 is 1080, key is row0062/info:/1733758567783/Put/seqid=0 2024-12-09T15:37:08,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/442aae658fbc4839b081d610eae93b97 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/442aae658fbc4839b081d610eae93b97 2024-12-09T15:37:08,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T15:37:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:46776 deadline: 1733758638718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 2024-12-09T15:37:08,720 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 , the old value is region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:37:08,720 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:37:08,720 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 because the exception is null or not the one we care about 2024-12-09T15:37:08,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741867_1043 (size=149389) 2024-12-09T15:37:08,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741867_1043 (size=149389) 2024-12-09T15:37:08,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/442aae658fbc4839b081d610eae93b97, entries=18, sequenceid=270, filesize=23.8 K 2024-12-09T15:37:08,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 68461f97512168f0a84fb8a3ee8a16d8 in 28ms, sequenceid=270, compaction requested=false 2024-12-09T15:37:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:08,732 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/76546390f0ca40cbbbf5032df30af196 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/76546390f0ca40cbbbf5032df30af196 2024-12-09T15:37:08,739 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68461f97512168f0a84fb8a3ee8a16d8/info of 68461f97512168f0a84fb8a3ee8a16d8 into 76546390f0ca40cbbbf5032df30af196(size=145.9 K), total size for store is 169.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:37:08,739 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:08,739 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., storeName=68461f97512168f0a84fb8a3ee8a16d8/info, priority=13, startTime=1733758628698; duration=0sec 2024-12-09T15:37:08,739 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:08,739 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68461f97512168f0a84fb8a3ee8a16d8:info 2024-12-09T15:37:08,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:08,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:09,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:09,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:09,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:09,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:10,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:10,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:10,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:10,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:11,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:11,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:11,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:11,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:12,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:12,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:12,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:12,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:13,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:13,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:13,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:13,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:13,920 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T15:37:14,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:14,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:14,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:14,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:15,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:15,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:15,409 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-09T15:37:15,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:15,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:16,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:16,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:16,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:16,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:17,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:17,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:17,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:17,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:18,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:18,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:18,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T15:37:18,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/0667a0ff422e40b7a9626e0b324dc2b4 is 1080, key is row0213/info:/1733758628699/Put/seqid=0 2024-12-09T15:37:18,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741868_1044 (size=17918) 2024-12-09T15:37:18,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741868_1044 (size=17918) 2024-12-09T15:37:18,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/0667a0ff422e40b7a9626e0b324dc2b4 2024-12-09T15:37:18,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/0667a0ff422e40b7a9626e0b324dc2b4 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/0667a0ff422e40b7a9626e0b324dc2b4 2024-12-09T15:37:18,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/0667a0ff422e40b7a9626e0b324dc2b4, entries=12, sequenceid=286, filesize=17.5 K 2024-12-09T15:37:18,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 68461f97512168f0a84fb8a3ee8a16d8 in 21ms, sequenceid=286, compaction requested=true 2024-12-09T15:37:18,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:18,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68461f97512168f0a84fb8a3ee8a16d8:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:37:18,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:18,776 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:37:18,777 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 191714 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:37:18,777 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): 68461f97512168f0a84fb8a3ee8a16d8/info is initiating minor compaction (all files) 2024-12-09T15:37:18,777 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68461f97512168f0a84fb8a3ee8a16d8/info in TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:18,777 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/76546390f0ca40cbbbf5032df30af196, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/442aae658fbc4839b081d610eae93b97, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/0667a0ff422e40b7a9626e0b324dc2b4] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp, totalSize=187.2 K 2024-12-09T15:37:18,777 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 76546390f0ca40cbbbf5032df30af196, keycount=133, bloomtype=ROW, size=145.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733758567783 2024-12-09T15:37:18,777 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 442aae658fbc4839b081d610eae93b97, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733758628671 2024-12-09T15:37:18,778 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0667a0ff422e40b7a9626e0b324dc2b4, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733758628699 2024-12-09T15:37:18,790 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68461f97512168f0a84fb8a3ee8a16d8#info#compaction#85 average throughput is 55.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:37:18,790 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/b74947a002d0485db01cee47d66167a8 is 1080, key is row0062/info:/1733758567783/Put/seqid=0 2024-12-09T15:37:18,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741869_1045 (size=181864) 2024-12-09T15:37:18,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741869_1045 (size=181864) 2024-12-09T15:37:18,799 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/b74947a002d0485db01cee47d66167a8 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/b74947a002d0485db01cee47d66167a8 2024-12-09T15:37:18,804 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68461f97512168f0a84fb8a3ee8a16d8/info of 68461f97512168f0a84fb8a3ee8a16d8 into b74947a002d0485db01cee47d66167a8(size=177.6 K), total size for store is 177.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:37:18,804 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:18,804 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., storeName=68461f97512168f0a84fb8a3ee8a16d8/info, priority=13, startTime=1733758638776; duration=0sec 2024-12-09T15:37:18,804 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:18,804 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68461f97512168f0a84fb8a3ee8a16d8:info 2024-12-09T15:37:18,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:18,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:19,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:19,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:19,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:19,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:20,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:20,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:20,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:20,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T15:37:20,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/8774afb4cb76447a8965cc23073c63cd is 1080, key is row0225/info:/1733758640756/Put/seqid=0 2024-12-09T15:37:20,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741870_1046 (size=12523) 2024-12-09T15:37:20,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741870_1046 (size=12523) 2024-12-09T15:37:20,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/8774afb4cb76447a8965cc23073c63cd 2024-12-09T15:37:20,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/8774afb4cb76447a8965cc23073c63cd as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8774afb4cb76447a8965cc23073c63cd 2024-12-09T15:37:20,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T15:37:20,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:46776 deadline: 1733758650798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 2024-12-09T15:37:20,799 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 , the old value is region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:37:20,799 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68461f97512168f0a84fb8a3ee8a16d8, server=3651d8ead0f3,45989,1733758543994 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T15:37:20,799 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., hostname=3651d8ead0f3,45989,1733758543994, seqNum=122 because the exception is null or not the one we care about 2024-12-09T15:37:20,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8774afb4cb76447a8965cc23073c63cd, entries=7, sequenceid=297, filesize=12.2 K 2024-12-09T15:37:20,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 68461f97512168f0a84fb8a3ee8a16d8 in 35ms, sequenceid=297, compaction requested=false 2024-12-09T15:37:20,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:20,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:20,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:21,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:21,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:21,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:21,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:22,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:22,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:22,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:22,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:23,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:23,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:23,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:23,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:24,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:24,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:24,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:24,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:25,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:25,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:25,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:25,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:26,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:26,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:26,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:26,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:27,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:27,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:27,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:27,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:28,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:28,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:28,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:28,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:29,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:29,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:29,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:29,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:30,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:30,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:30,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:30,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45989 {}] regionserver.HRegion(8855): Flush requested on 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:30,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T15:37:30,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/d657dd41956443549d0fa97befee845c is 1080, key is row0232/info:/1733758640766/Put/seqid=0 2024-12-09T15:37:30,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:30,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741871_1047 (size=29807) 2024-12-09T15:37:30,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741871_1047 (size=29807) 2024-12-09T15:37:30,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/d657dd41956443549d0fa97befee845c 2024-12-09T15:37:30,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/d657dd41956443549d0fa97befee845c as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/d657dd41956443549d0fa97befee845c 2024-12-09T15:37:30,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/d657dd41956443549d0fa97befee845c, entries=23, sequenceid=323, filesize=29.1 K 2024-12-09T15:37:30,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 68461f97512168f0a84fb8a3ee8a16d8 in 23ms, sequenceid=323, compaction requested=true 2024-12-09T15:37:30,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:30,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68461f97512168f0a84fb8a3ee8a16d8:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T15:37:30,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:30,907 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T15:37:30,908 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 224194 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T15:37:30,908 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1541): 68461f97512168f0a84fb8a3ee8a16d8/info is initiating minor compaction (all files) 2024-12-09T15:37:30,908 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68461f97512168f0a84fb8a3ee8a16d8/info in TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:30,908 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/b74947a002d0485db01cee47d66167a8, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8774afb4cb76447a8965cc23073c63cd, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/d657dd41956443549d0fa97befee845c] into tmpdir=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp, totalSize=218.9 K 2024-12-09T15:37:30,909 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting b74947a002d0485db01cee47d66167a8, keycount=163, bloomtype=ROW, size=177.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733758567783 2024-12-09T15:37:30,909 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8774afb4cb76447a8965cc23073c63cd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733758640756 2024-12-09T15:37:30,909 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] compactions.Compactor(225): Compacting d657dd41956443549d0fa97befee845c, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733758640766 2024-12-09T15:37:30,920 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68461f97512168f0a84fb8a3ee8a16d8#info#compaction#88 average throughput is 99.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T15:37:30,921 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/95e801d6b0e04242845950d97a8b2a68 is 1080, key is row0062/info:/1733758567783/Put/seqid=0 2024-12-09T15:37:30,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741872_1048 (size=214413) 2024-12-09T15:37:30,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741872_1048 (size=214413) 2024-12-09T15:37:30,935 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/95e801d6b0e04242845950d97a8b2a68 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/95e801d6b0e04242845950d97a8b2a68 2024-12-09T15:37:30,941 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68461f97512168f0a84fb8a3ee8a16d8/info of 68461f97512168f0a84fb8a3ee8a16d8 into 95e801d6b0e04242845950d97a8b2a68(size=209.4 K), total size for store is 209.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T15:37:30,941 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:30,941 INFO [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8., storeName=68461f97512168f0a84fb8a3ee8a16d8/info, priority=13, startTime=1733758650907; duration=0sec 2024-12-09T15:37:30,942 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T15:37:30,942 DEBUG [RS:0;3651d8ead0f3:45989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68461f97512168f0a84fb8a3ee8a16d8:info 2024-12-09T15:37:31,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:31,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:31,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:31,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:32,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:32,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:32,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:32,888 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-09T15:37:32,888 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C45989%2C1733758543994.1733758652888 2024-12-09T15:37:32,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:32,894 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,894 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,895 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,895 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,895 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,895 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758544733 with entries=308, filesize=307.10 KB; new WAL /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758652888 2024-12-09T15:37:32,896 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33241:33241),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:37:32,896 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758544733 is not closed yet, will try archiving it next time 2024-12-09T15:37:32,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741833_1009 (size=314476) 2024-12-09T15:37:32,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741833_1009 (size=314476) 2024-12-09T15:37:32,900 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-09T15:37:32,904 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/info/2637c989d3994ec4b89cc306bcdd57dd is 186, key is TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491./info:regioninfo/1733758571736/Put/seqid=0 2024-12-09T15:37:32,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741874_1050 (size=6153) 2024-12-09T15:37:32,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741874_1050 (size=6153) 2024-12-09T15:37:32,910 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/info/2637c989d3994ec4b89cc306bcdd57dd 2024-12-09T15:37:32,915 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/.tmp/info/2637c989d3994ec4b89cc306bcdd57dd as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/info/2637c989d3994ec4b89cc306bcdd57dd 2024-12-09T15:37:32,920 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/info/2637c989d3994ec4b89cc306bcdd57dd, entries=5, sequenceid=21, filesize=6.0 K 2024-12-09T15:37:32,921 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-12-09T15:37:32,921 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T15:37:32,921 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 04c086fc533fb33cebb40916dc3df491: 2024-12-09T15:37:32,921 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 68461f97512168f0a84fb8a3ee8a16d8 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-12-09T15:37:32,925 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/e5ed0ce6d5fb4eaf8bccb595136f9865 is 1080, key is row0255/info:/1733758650885/Put/seqid=0 2024-12-09T15:37:32,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741875_1051 (size=7116) 2024-12-09T15:37:32,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741875_1051 (size=7116) 2024-12-09T15:37:32,930 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/e5ed0ce6d5fb4eaf8bccb595136f9865 2024-12-09T15:37:32,935 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/.tmp/info/e5ed0ce6d5fb4eaf8bccb595136f9865 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/e5ed0ce6d5fb4eaf8bccb595136f9865 2024-12-09T15:37:32,939 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/e5ed0ce6d5fb4eaf8bccb595136f9865, entries=2, sequenceid=329, filesize=6.9 K 2024-12-09T15:37:32,940 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 68461f97512168f0a84fb8a3ee8a16d8 in 19ms, sequenceid=329, compaction requested=false 2024-12-09T15:37:32,940 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 68461f97512168f0a84fb8a3ee8a16d8: 2024-12-09T15:37:32,940 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C45989%2C1733758543994.1733758652940 2024-12-09T15:37:32,945 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,945 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,945 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,945 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:32,946 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758652888 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758652940 2024-12-09T15:37:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741873_1049 (size=731) 2024-12-09T15:37:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741873_1049 (size=731) 2024-12-09T15:37:32,949 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33241:33241),(127.0.0.1/127.0.0.1:40877:40877)] 2024-12-09T15:37:32,950 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758544733 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/oldWALs/3651d8ead0f3%2C45989%2C1733758543994.1733758544733 2024-12-09T15:37:32,950 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T15:37:32,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T15:37:32,951 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:37:32,951 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:37:32,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:32,951 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T15:37:32,951 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/WALs/3651d8ead0f3,45989,1733758543994/3651d8ead0f3%2C45989%2C1733758543994.1733758652888 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/oldWALs/3651d8ead0f3%2C45989%2C1733758543994.1733758652888 2024-12-09T15:37:32,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:32,951 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T15:37:32,951 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2118415407, stopped=false 2024-12-09T15:37:32,951 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3651d8ead0f3,38049,1733758543939 2024-12-09T15:37:32,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:37:32,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:32,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:37:32,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:32,953 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:37:32,953 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:37:32,954 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:37:32,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:32,954 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,45989,1733758543994' ***** 2024-12-09T15:37:32,954 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:37:32,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:37:32,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:37:32,954 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:37:32,955 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(3091): Received CLOSE for 04c086fc533fb33cebb40916dc3df491 2024-12-09T15:37:32,955 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 04c086fc533fb33cebb40916dc3df491, disabling compactions & flushes 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(3091): Received CLOSE for 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,45989,1733758543994 2024-12-09T15:37:32,955 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:37:32,955 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:37:32,955 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. after waiting 0 ms 2024-12-09T15:37:32,955 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3651d8ead0f3:45989. 2024-12-09T15:37:32,955 DEBUG [RS:0;3651d8ead0f3:45989 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:37:32,955 DEBUG [RS:0;3651d8ead0f3:45989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:37:32,955 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T15:37:32,956 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d->hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc-bottom] to archive 2024-12-09T15:37:32,957 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T15:37:32,958 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:37:32,958 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-09T15:37:32,958 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 04c086fc533fb33cebb40916dc3df491=TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491., 68461f97512168f0a84fb8a3ee8a16d8=TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.} 2024-12-09T15:37:32,958 DEBUG [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1351): Waiting on 04c086fc533fb33cebb40916dc3df491, 1588230740, 68461f97512168f0a84fb8a3ee8a16d8 2024-12-09T15:37:32,958 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3651d8ead0f3:38049 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T15:37:32,958 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:37:32,958 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-09T15:37:32,958 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:37:32,959 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:37:32,959 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:37:32,959 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:37:32,964 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/04c086fc533fb33cebb40916dc3df491/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-12-09T15:37:32,965 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 04c086fc533fb33cebb40916dc3df491: Waiting for close lock at 1733758652955Running coprocessor pre-close hooks at 1733758652955Disabling compacts and flushes for region at 1733758652955Disabling writes for close at 1733758652955Writing region close event to WAL at 1733758652960 (+5 ms)Running coprocessor post-close hooks at 1733758652964 (+4 ms)Closed at 1733758652964 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733758569948.04c086fc533fb33cebb40916dc3df491. 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 68461f97512168f0a84fb8a3ee8a16d8, disabling compactions & flushes 2024-12-09T15:37:32,965 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. after waiting 0 ms 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:37:32,965 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758652958Running coprocessor pre-close hooks at 1733758652958Disabling compacts and flushes for region at 1733758652958Disabling writes for close at 1733758652959 (+1 ms)Writing region close event to WAL at 1733758652962 (+3 ms)Running coprocessor post-close hooks at 1733758652965 (+3 ms)Closed at 1733758652965 2024-12-09T15:37:32,965 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T15:37:32,965 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d->hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/e4cb41ff21765f272c4aaa91a5fdf38d/info/8cd328a9ef254adaaa3d4865125c98bc-top, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/cc977974ce6942c6aa359cd481d045a3, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2292f657e3a848449aa7cc907f116f3e, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/911c81e595964a94a40125c851379367, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/863a6bd60c9845939e6401a5f1fd822a, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/6a31273e45df4a29a578321502bcc4b9, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/96bc4f44f22d442d8ae983014d0d5ef6, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f3cfbe35e6f446ec91eac04d17100921, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/db6383251f0e469e9fa88bd0561c6eb1, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f75aed28d1024ee7a4804053f8ea2e2e, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/30c3ceed0e004ab6bf58c440455684f6, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2af96809d7c148fea9b84a03814116ea, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/76546390f0ca40cbbbf5032df30af196, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/55c41755dee74474ba5ce5fcd9f3b6d4, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/442aae658fbc4839b081d610eae93b97, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/b74947a002d0485db01cee47d66167a8, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/0667a0ff422e40b7a9626e0b324dc2b4, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8774afb4cb76447a8965cc23073c63cd, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/d657dd41956443549d0fa97befee845c] to archive 2024-12-09T15:37:32,966 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T15:37:32,968 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8cd328a9ef254adaaa3d4865125c98bc.e4cb41ff21765f272c4aaa91a5fdf38d 2024-12-09T15:37:32,969 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-7cac9a1e9bd6411e82edfa02d2cfd64e 2024-12-09T15:37:32,970 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/cc977974ce6942c6aa359cd481d045a3 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/cc977974ce6942c6aa359cd481d045a3 2024-12-09T15:37:32,971 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/TestLogRolling-testLogRolling=e4cb41ff21765f272c4aaa91a5fdf38d-198372afb8c64f48b36814b1e29e8a53 2024-12-09T15:37:32,972 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2292f657e3a848449aa7cc907f116f3e to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2292f657e3a848449aa7cc907f116f3e 2024-12-09T15:37:32,973 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/911c81e595964a94a40125c851379367 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/911c81e595964a94a40125c851379367 2024-12-09T15:37:32,974 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/863a6bd60c9845939e6401a5f1fd822a to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/863a6bd60c9845939e6401a5f1fd822a 2024-12-09T15:37:32,975 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/6a31273e45df4a29a578321502bcc4b9 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/6a31273e45df4a29a578321502bcc4b9 2024-12-09T15:37:32,976 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/96bc4f44f22d442d8ae983014d0d5ef6 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/96bc4f44f22d442d8ae983014d0d5ef6 2024-12-09T15:37:32,977 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f3cfbe35e6f446ec91eac04d17100921 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f3cfbe35e6f446ec91eac04d17100921 2024-12-09T15:37:32,978 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/db6383251f0e469e9fa88bd0561c6eb1 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/db6383251f0e469e9fa88bd0561c6eb1 2024-12-09T15:37:32,979 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f75aed28d1024ee7a4804053f8ea2e2e to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/f75aed28d1024ee7a4804053f8ea2e2e 2024-12-09T15:37:32,980 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/30c3ceed0e004ab6bf58c440455684f6 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/30c3ceed0e004ab6bf58c440455684f6 2024-12-09T15:37:32,981 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2af96809d7c148fea9b84a03814116ea to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/2af96809d7c148fea9b84a03814116ea 2024-12-09T15:37:32,982 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/76546390f0ca40cbbbf5032df30af196 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/76546390f0ca40cbbbf5032df30af196 2024-12-09T15:37:32,983 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/55c41755dee74474ba5ce5fcd9f3b6d4 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/55c41755dee74474ba5ce5fcd9f3b6d4 2024-12-09T15:37:32,984 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/442aae658fbc4839b081d610eae93b97 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/442aae658fbc4839b081d610eae93b97 2024-12-09T15:37:32,985 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/b74947a002d0485db01cee47d66167a8 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/b74947a002d0485db01cee47d66167a8 2024-12-09T15:37:32,986 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/0667a0ff422e40b7a9626e0b324dc2b4 to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/0667a0ff422e40b7a9626e0b324dc2b4 2024-12-09T15:37:32,987 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8774afb4cb76447a8965cc23073c63cd to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/8774afb4cb76447a8965cc23073c63cd 2024-12-09T15:37:32,988 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/d657dd41956443549d0fa97befee845c to hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/archive/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/info/d657dd41956443549d0fa97befee845c 2024-12-09T15:37:32,988 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cc977974ce6942c6aa359cd481d045a3=40830, 2292f657e3a848449aa7cc907f116f3e=12513, 911c81e595964a94a40125c851379367=67947, 863a6bd60c9845939e6401a5f1fd822a=24394, 6a31273e45df4a29a578321502bcc4b9=15750, 96bc4f44f22d442d8ae983014d0d5ef6=100466, f3cfbe35e6f446ec91eac04d17100921=26550, db6383251f0e469e9fa88bd0561c6eb1=15750, f75aed28d1024ee7a4804053f8ea2e2e=118898, 30c3ceed0e004ab6bf58c440455684f6=12516, 2af96809d7c148fea9b84a03814116ea=27628, 76546390f0ca40cbbbf5032df30af196=149389, 55c41755dee74474ba5ce5fcd9f3b6d4=12516, 442aae658fbc4839b081d610eae93b97=24407, b74947a002d0485db01cee47d66167a8=181864, 0667a0ff422e40b7a9626e0b324dc2b4=17918, 8774afb4cb76447a8965cc23073c63cd=12523, d657dd41956443549d0fa97befee845c=29807] 2024-12-09T15:37:32,992 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/data/default/TestLogRolling-testLogRolling/68461f97512168f0a84fb8a3ee8a16d8/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=121 2024-12-09T15:37:32,992 INFO [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:32,993 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 68461f97512168f0a84fb8a3ee8a16d8: Waiting for close lock at 1733758652965Running coprocessor pre-close hooks at 1733758652965Disabling compacts and flushes for region at 1733758652965Disabling writes for close at 1733758652965Writing region close event to WAL at 1733758652989 (+24 ms)Running coprocessor post-close hooks at 1733758652992 (+3 ms)Closed at 1733758652992 2024-12-09T15:37:32,993 DEBUG [RS_CLOSE_REGION-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733758569948.68461f97512168f0a84fb8a3ee8a16d8. 2024-12-09T15:37:33,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:33,159 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,45989,1733758543994; all regions closed. 2024-12-09T15:37:33,159 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,159 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,159 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,160 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,160 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741834_1010 (size=8107) 2024-12-09T15:37:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741834_1010 (size=8107) 2024-12-09T15:37:33,164 DEBUG [RS:0;3651d8ead0f3:45989 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/oldWALs 2024-12-09T15:37:33,164 INFO [RS:0;3651d8ead0f3:45989 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C45989%2C1733758543994.meta:.meta(num 1733758545163) 2024-12-09T15:37:33,165 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,165 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,165 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,165 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,165 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741876_1052 (size=778) 2024-12-09T15:37:33,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741876_1052 (size=778) 2024-12-09T15:37:33,169 DEBUG [RS:0;3651d8ead0f3:45989 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/oldWALs 2024-12-09T15:37:33,169 INFO [RS:0;3651d8ead0f3:45989 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C45989%2C1733758543994:(num 1733758652940) 2024-12-09T15:37:33,169 DEBUG [RS:0;3651d8ead0f3:45989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:33,169 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:37:33,169 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:37:33,169 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T15:37:33,170 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:37:33,170 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:37:33,170 INFO [RS:0;3651d8ead0f3:45989 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45989 2024-12-09T15:37:33,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,45989,1733758543994 2024-12-09T15:37:33,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:37:33,172 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:37:33,174 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,45989,1733758543994] 2024-12-09T15:37:33,176 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,45989,1733758543994 already deleted, retry=false 2024-12-09T15:37:33,176 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,45989,1733758543994 expired; onlineServers=0 2024-12-09T15:37:33,176 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3651d8ead0f3,38049,1733758543939' ***** 2024-12-09T15:37:33,176 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T15:37:33,176 INFO [M:0;3651d8ead0f3:38049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:37:33,176 INFO [M:0;3651d8ead0f3:38049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:37:33,176 DEBUG [M:0;3651d8ead0f3:38049 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T15:37:33,176 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T15:37:33,176 DEBUG [M:0;3651d8ead0f3:38049 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T15:37:33,176 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758544523 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758544523,5,FailOnTimeoutGroup] 2024-12-09T15:37:33,176 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758544523 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758544523,5,FailOnTimeoutGroup] 2024-12-09T15:37:33,176 INFO [M:0;3651d8ead0f3:38049 {}] hbase.ChoreService(370): Chore service for: master/3651d8ead0f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T15:37:33,176 INFO [M:0;3651d8ead0f3:38049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:37:33,176 DEBUG [M:0;3651d8ead0f3:38049 {}] master.HMaster(1795): Stopping service threads 2024-12-09T15:37:33,177 INFO [M:0;3651d8ead0f3:38049 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T15:37:33,177 INFO [M:0;3651d8ead0f3:38049 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:37:33,177 INFO [M:0;3651d8ead0f3:38049 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T15:37:33,177 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T15:37:33,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T15:37:33,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:33,185 DEBUG [M:0;3651d8ead0f3:38049 {}] zookeeper.ZKUtil(347): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T15:37:33,185 WARN [M:0;3651d8ead0f3:38049 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T15:37:33,185 INFO [M:0;3651d8ead0f3:38049 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/.lastflushedseqids 2024-12-09T15:37:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741877_1053 (size=228) 2024-12-09T15:37:33,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741877_1053 (size=228) 2024-12-09T15:37:33,191 INFO [M:0;3651d8ead0f3:38049 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T15:37:33,191 INFO [M:0;3651d8ead0f3:38049 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T15:37:33,191 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:37:33,191 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:33,191 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:33,191 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:37:33,191 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:33,191 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-12-09T15:37:33,208 DEBUG [M:0;3651d8ead0f3:38049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/002337c4862d4ddc9beee6516088ba16 is 82, key is hbase:meta,,1/info:regioninfo/1733758545215/Put/seqid=0 2024-12-09T15:37:33,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741878_1054 (size=5672) 2024-12-09T15:37:33,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741878_1054 (size=5672) 2024-12-09T15:37:33,214 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/002337c4862d4ddc9beee6516088ba16 2024-12-09T15:37:33,244 DEBUG [M:0;3651d8ead0f3:38049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5a7e8968cfa4a9a8dbcaf9e87e33680 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733758545869/Put/seqid=0 2024-12-09T15:37:33,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741879_1055 (size=7091) 2024-12-09T15:37:33,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741879_1055 (size=7091) 2024-12-09T15:37:33,253 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5a7e8968cfa4a9a8dbcaf9e87e33680 2024-12-09T15:37:33,259 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d5a7e8968cfa4a9a8dbcaf9e87e33680 2024-12-09T15:37:33,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:33,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45989-0x1012bd378510001, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:33,278 INFO [RS:0;3651d8ead0f3:45989 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:37:33,278 INFO [RS:0;3651d8ead0f3:45989 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,45989,1733758543994; zookeeper connection closed. 2024-12-09T15:37:33,280 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a073ec3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a073ec3 2024-12-09T15:37:33,280 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T15:37:33,285 DEBUG [M:0;3651d8ead0f3:38049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e55c2f80f33408d82558e433993828d is 69, key is 3651d8ead0f3,45989,1733758543994/rs:state/1733758544577/Put/seqid=0 2024-12-09T15:37:33,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741880_1056 (size=5156) 2024-12-09T15:37:33,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741880_1056 (size=5156) 2024-12-09T15:37:33,306 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e55c2f80f33408d82558e433993828d 2024-12-09T15:37:33,332 DEBUG [M:0;3651d8ead0f3:38049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/15d213eaea6141f2baf505b950949021 is 52, key is load_balancer_on/state:d/1733758545357/Put/seqid=0 2024-12-09T15:37:33,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741881_1057 (size=5056) 2024-12-09T15:37:33,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741881_1057 (size=5056) 2024-12-09T15:37:33,340 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/15d213eaea6141f2baf505b950949021 2024-12-09T15:37:33,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:33,350 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/002337c4862d4ddc9beee6516088ba16 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/002337c4862d4ddc9beee6516088ba16 2024-12-09T15:37:33,355 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/002337c4862d4ddc9beee6516088ba16, entries=8, sequenceid=125, filesize=5.5 K 2024-12-09T15:37:33,356 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5a7e8968cfa4a9a8dbcaf9e87e33680 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5a7e8968cfa4a9a8dbcaf9e87e33680 2024-12-09T15:37:33,361 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d5a7e8968cfa4a9a8dbcaf9e87e33680 2024-12-09T15:37:33,361 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5a7e8968cfa4a9a8dbcaf9e87e33680, entries=13, sequenceid=125, filesize=6.9 K 2024-12-09T15:37:33,362 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e55c2f80f33408d82558e433993828d as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3e55c2f80f33408d82558e433993828d 2024-12-09T15:37:33,366 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3e55c2f80f33408d82558e433993828d, entries=1, sequenceid=125, filesize=5.0 K 2024-12-09T15:37:33,367 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/15d213eaea6141f2baf505b950949021 as hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/15d213eaea6141f2baf505b950949021 2024-12-09T15:37:33,371 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35067/user/jenkins/test-data/2883ea0d-9f1f-a2ed-05d0-7c74abd427e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/15d213eaea6141f2baf505b950949021, entries=1, sequenceid=125, filesize=4.9 K 2024-12-09T15:37:33,372 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 181ms, sequenceid=125, compaction requested=false 2024-12-09T15:37:33,379 INFO [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:33,379 DEBUG [M:0;3651d8ead0f3:38049 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758653191Disabling compacts and flushes for region at 1733758653191Disabling writes for close at 1733758653191Obtaining lock to block concurrent updates at 1733758653191Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733758653191Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1733758653192 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733758653192Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733758653192Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733758653208 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733758653208Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733758653220 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733758653243 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733758653243Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733758653259 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733758653285 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733758653285Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733758653312 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733758653332 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733758653332Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f9f2ed8: reopening flushed file at 1733758653345 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ce5f757: reopening flushed file at 1733758653355 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49834e94: reopening flushed file at 1733758653361 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24622cd5: reopening flushed file at 1733758653366 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 181ms, sequenceid=125, compaction requested=false at 1733758653372 (+6 ms)Writing region close event to WAL at 1733758653379 (+7 ms)Closed at 1733758653379 2024-12-09T15:37:33,384 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,390 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:33,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741830_1006 (size=61332) 2024-12-09T15:37:33,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741830_1006 (size=61332) 2024-12-09T15:37:33,796 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:37:33,796 INFO [M:0;3651d8ead0f3:38049 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T15:37:33,796 INFO [M:0;3651d8ead0f3:38049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38049 2024-12-09T15:37:33,797 INFO [M:0;3651d8ead0f3:38049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:37:33,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:33,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:33,899 INFO [M:0;3651d8ead0f3:38049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:37:33,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:33,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38049-0x1012bd378510000, quorum=127.0.0.1:51267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:33,912 WARN [BP-185995437-172.17.0.3-1733758542821 heartbeating to localhost/127.0.0.1:35067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-185995437-172.17.0.3-1733758542821 (Datanode Uuid de1e8045-99e5-4924-8537-610aeadc82c1) service to localhost/127.0.0.1:35067 2024-12-09T15:37:33,913 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data3/current/BP-185995437-172.17.0.3-1733758542821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:33,913 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data4/current/BP-185995437-172.17.0.3-1733758542821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:33,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5eab25ce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:37:33,919 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5773e0ea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:37:33,919 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:37:33,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70aed17c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:37:33,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@208716aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir/,STOPPED} 2024-12-09T15:37:33,926 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:37:33,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9530f47{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:37:33,936 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59fd7f75{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:37:33,936 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:37:33,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1724ca70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:37:33,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c08109c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir/,STOPPED} 2024-12-09T15:37:33,947 WARN [BP-185995437-172.17.0.3-1733758542821 heartbeating to localhost/127.0.0.1:35067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:37:33,947 WARN [BP-185995437-172.17.0.3-1733758542821 heartbeating to localhost/127.0.0.1:35067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-185995437-172.17.0.3-1733758542821 (Datanode Uuid 4ae49c9f-0a7c-499a-a990-93fd117cd1ce) service to localhost/127.0.0.1:35067 2024-12-09T15:37:33,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data1/current/BP-185995437-172.17.0.3-1733758542821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:33,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/cluster_d00fb838-a659-ed87-493c-b1209c801121/data/data2/current/BP-185995437-172.17.0.3-1733758542821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:33,949 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:37:33,952 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:37:33,952 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:37:33,963 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@284a8a01{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:37:33,967 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26d44036{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:37:33,967 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:37:33,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1520cb76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:37:33,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41c3a275{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir/,STOPPED} 2024-12-09T15:37:33,989 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T15:37:34,032 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T15:37:34,048 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 208) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35067 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35067 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35067 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/3651d8ead0f3:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:35067 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35067 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=506 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=534 (was 801), ProcessCount=11 (was 11), AvailableMemoryMB=5369 (was 4776) - AvailableMemoryMB LEAK? - 2024-12-09T15:37:34,062 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=506, MaxFileDescriptor=1048576, SystemLoadAverage=534, ProcessCount=11, AvailableMemoryMB=5366 2024-12-09T15:37:34,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T15:37:34,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.log.dir so I do NOT create it in target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1 2024-12-09T15:37:34,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/64b39c9c-938b-2919-e8ee-22d4a41a1daf/hadoop.tmp.dir so I do NOT create it in target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1 2024-12-09T15:37:34,063 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76, deleteOnExit=true 2024-12-09T15:37:34,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T15:37:34,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/test.cache.data in system properties and HBase conf 2024-12-09T15:37:34,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T15:37:34,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.log.dir in system properties and HBase conf 2024-12-09T15:37:34,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T15:37:34,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T15:37:34,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T15:37:34,064 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T15:37:34,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:37:34,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T15:37:34,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T15:37:34,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/nfs.dump.dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/java.io.tmpdir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T15:37:34,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T15:37:34,080 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:37:34,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:34,174 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:37:34,179 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:37:34,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:37:34,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:37:34,193 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T15:37:34,194 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:37:34,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61266769{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:37:34,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a47ab0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:37:34,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20339ff2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/java.io.tmpdir/jetty-localhost-41699-hadoop-hdfs-3_4_1-tests_jar-_-any-9901925808552444141/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:37:34,338 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@238bf9b3{HTTP/1.1, (http/1.1)}{localhost:41699} 2024-12-09T15:37:34,339 INFO [Time-limited test {}] server.Server(415): Started @379990ms 2024-12-09T15:37:34,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:34,356 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T15:37:34,503 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:37:34,509 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:37:34,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:37:34,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:37:34,517 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:37:34,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ec9da7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:37:34,519 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d45b9b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:37:34,599 INFO [regionserver/3651d8ead0f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:37:34,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ab56e91{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/java.io.tmpdir/jetty-localhost-33411-hadoop-hdfs-3_4_1-tests_jar-_-any-12621009281278156371/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:37:34,676 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5eefcd14{HTTP/1.1, (http/1.1)}{localhost:33411} 2024-12-09T15:37:34,676 INFO [Time-limited test {}] server.Server(415): Started @380327ms 2024-12-09T15:37:34,678 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:37:34,773 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T15:37:34,785 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T15:37:34,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T15:37:34,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T15:37:34,799 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T15:37:34,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20dd8a9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.log.dir/,AVAILABLE} 2024-12-09T15:37:34,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59532081{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T15:37:34,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:34,875 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data1/current/BP-293280157-172.17.0.3-1733758654087/current, will proceed with Du for space computation calculation, 2024-12-09T15:37:34,886 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data2/current/BP-293280157-172.17.0.3-1733758654087/current, will proceed with Du for space computation calculation, 2024-12-09T15:37:34,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:34,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@616bab4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/java.io.tmpdir/jetty-localhost-39109-hadoop-hdfs-3_4_1-tests_jar-_-any-2926506477720852372/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:37:34,960 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2269c58e{HTTP/1.1, (http/1.1)}{localhost:39109} 2024-12-09T15:37:34,960 INFO [Time-limited test {}] server.Server(415): Started @380612ms 2024-12-09T15:37:34,962 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T15:37:34,975 WARN [Thread-2472 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:37:34,986 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4ecebdf0226470d with lease ID 0x18e7d52e139ac7c4: Processing first storage report for DS-773c357b-d80c-4b99-9674-4f6654a5ead3 from datanode DatanodeRegistration(127.0.0.1:38815, datanodeUuid=37c55293-3aa0-4b87-9908-7499147af738, infoPort=38425, infoSecurePort=0, ipcPort=33813, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087) 2024-12-09T15:37:34,986 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4ecebdf0226470d with lease ID 0x18e7d52e139ac7c4: from storage DS-773c357b-d80c-4b99-9674-4f6654a5ead3 node DatanodeRegistration(127.0.0.1:38815, datanodeUuid=37c55293-3aa0-4b87-9908-7499147af738, infoPort=38425, infoSecurePort=0, ipcPort=33813, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:37:34,986 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4ecebdf0226470d with lease ID 0x18e7d52e139ac7c4: Processing first storage report for DS-9c03b676-8c02-418d-908e-f56679ada303 from datanode DatanodeRegistration(127.0.0.1:38815, datanodeUuid=37c55293-3aa0-4b87-9908-7499147af738, infoPort=38425, infoSecurePort=0, ipcPort=33813, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087) 2024-12-09T15:37:34,986 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4ecebdf0226470d with lease ID 0x18e7d52e139ac7c4: from storage DS-9c03b676-8c02-418d-908e-f56679ada303 node DatanodeRegistration(127.0.0.1:38815, datanodeUuid=37c55293-3aa0-4b87-9908-7499147af738, infoPort=38425, infoSecurePort=0, ipcPort=33813, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:37:35,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:35,130 WARN [Thread-2519 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data3/current/BP-293280157-172.17.0.3-1733758654087/current, will proceed with Du for space computation calculation, 2024-12-09T15:37:35,135 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data4/current/BP-293280157-172.17.0.3-1733758654087/current, will proceed with Du for space computation calculation, 2024-12-09T15:37:35,165 WARN [Thread-2508 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T15:37:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x538bd367bdd2326f with lease ID 0x18e7d52e139ac7c5: Processing first storage report for DS-10826b03-fd0e-4a0b-a033-b25f6eefb914 from datanode DatanodeRegistration(127.0.0.1:46335, datanodeUuid=4bc40f71-c804-4fd1-9979-000ddf434d51, infoPort=34845, infoSecurePort=0, ipcPort=34007, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087) 2024-12-09T15:37:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x538bd367bdd2326f with lease ID 0x18e7d52e139ac7c5: from storage DS-10826b03-fd0e-4a0b-a033-b25f6eefb914 node DatanodeRegistration(127.0.0.1:46335, datanodeUuid=4bc40f71-c804-4fd1-9979-000ddf434d51, infoPort=34845, infoSecurePort=0, ipcPort=34007, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:37:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x538bd367bdd2326f with lease ID 0x18e7d52e139ac7c5: Processing first storage report for DS-12c2ee30-5e45-4cc4-940a-c4e812c00e80 from datanode DatanodeRegistration(127.0.0.1:46335, datanodeUuid=4bc40f71-c804-4fd1-9979-000ddf434d51, infoPort=34845, infoSecurePort=0, ipcPort=34007, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087) 2024-12-09T15:37:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x538bd367bdd2326f with lease ID 0x18e7d52e139ac7c5: from storage DS-12c2ee30-5e45-4cc4-940a-c4e812c00e80 node DatanodeRegistration(127.0.0.1:46335, datanodeUuid=4bc40f71-c804-4fd1-9979-000ddf434d51, infoPort=34845, infoSecurePort=0, ipcPort=34007, storageInfo=lv=-57;cid=testClusterID;nsid=456449925;c=1733758654087), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T15:37:35,267 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1 2024-12-09T15:37:35,275 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/zookeeper_0, clientPort=55641, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T15:37:35,276 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55641 2024-12-09T15:37:35,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:35,278 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:35,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:37:35,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741825_1001 (size=7) 2024-12-09T15:37:35,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:35,728 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e with version=8 2024-12-09T15:37:35,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36359/user/jenkins/test-data/a66ee57d-534b-3377-2f49-519d2d0dc648/hbase-staging 2024-12-09T15:37:35,731 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:37:35,731 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:37:35,731 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:37:35,731 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:37:35,731 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:37:35,731 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:37:35,732 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T15:37:35,732 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:37:35,733 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44363 2024-12-09T15:37:35,734 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44363 connecting to ZooKeeper ensemble=127.0.0.1:55641 2024-12-09T15:37:35,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443630x0, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:37:35,770 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44363-0x1012bd52b450000 connected 2024-12-09T15:37:35,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:35,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:35,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:37:35,825 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e, hbase.cluster.distributed=false 2024-12-09T15:37:35,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:37:35,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44363 2024-12-09T15:37:35,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44363 2024-12-09T15:37:35,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44363 2024-12-09T15:37:35,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44363 2024-12-09T15:37:35,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44363 2024-12-09T15:37:35,846 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3651d8ead0f3:0 server-side Connection retries=45 2024-12-09T15:37:35,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:37:35,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T15:37:35,846 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T15:37:35,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T15:37:35,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T15:37:35,846 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T15:37:35,846 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T15:37:35,847 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42475 2024-12-09T15:37:35,848 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42475 connecting to ZooKeeper ensemble=127.0.0.1:55641 2024-12-09T15:37:35,849 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:35,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:35,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:35,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424750x0, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T15:37:35,857 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42475-0x1012bd52b450001 connected 2024-12-09T15:37:35,857 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:37:35,857 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T15:37:35,858 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T15:37:35,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T15:37:35,860 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T15:37:35,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42475 2024-12-09T15:37:35,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42475 2024-12-09T15:37:35,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42475 2024-12-09T15:37:35,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42475 2024-12-09T15:37:35,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42475 2024-12-09T15:37:35,897 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3651d8ead0f3:44363 2024-12-09T15:37:35,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:35,899 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:35,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:37:35,902 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:35,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:37:35,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:35,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T15:37:35,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:35,905 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T15:37:35,905 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3651d8ead0f3,44363,1733758655731 from backup master directory 2024-12-09T15:37:35,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:35,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:37:35,912 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:37:35,912 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:35,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T15:37:35,925 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(347): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Unable to get data of znode /hbase/backup-masters/3651d8ead0f3,44363,1733758655731 because node does not exist (not an error) 2024-12-09T15:37:35,939 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/hbase.id] with ID: 546a3720-9822-4eba-a694-9e56674f0083 2024-12-09T15:37:35,939 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/.tmp/hbase.id 2024-12-09T15:37:35,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:37:35,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741826_1002 (size=42) 2024-12-09T15:37:36,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:36,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:36,392 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/.tmp/hbase.id]:[hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/hbase.id] 2024-12-09T15:37:36,412 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:36,412 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T15:37:36,416 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 4ms. 2024-12-09T15:37:36,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:37:36,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741827_1003 (size=196) 2024-12-09T15:37:36,833 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T15:37:36,834 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T15:37:36,835 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:37:36,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:37:36,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741828_1004 (size=1189) 2024-12-09T15:37:36,850 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store 2024-12-09T15:37:36,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:36,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:37:36,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741829_1005 (size=34) 2024-12-09T15:37:36,861 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:37:36,861 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:37:36,861 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:36,861 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:36,861 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:37:36,861 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:36,861 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:36,861 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758656861Disabling compacts and flushes for region at 1733758656861Disabling writes for close at 1733758656861Writing region close event to WAL at 1733758656861Closed at 1733758656861 2024-12-09T15:37:36,862 WARN [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/.initializing 2024-12-09T15:37:36,862 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/WALs/3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:36,865 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C44363%2C1733758655731, suffix=, logDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/WALs/3651d8ead0f3,44363,1733758655731, archiveDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/oldWALs, maxLogs=10 2024-12-09T15:37:36,866 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C44363%2C1733758655731.1733758656865 2024-12-09T15:37:36,875 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/WALs/3651d8ead0f3,44363,1733758655731/3651d8ead0f3%2C44363%2C1733758655731.1733758656865 2024-12-09T15:37:36,882 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34845:34845),(127.0.0.1/127.0.0.1:38425:38425)] 2024-12-09T15:37:36,887 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:37:36,887 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:37:36,887 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,887 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T15:37:36,893 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:36,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:36,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T15:37:36,895 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:36,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:37:36,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T15:37:36,896 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:36,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:37:36,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,898 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T15:37:36,898 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:36,898 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T15:37:36,899 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:36,899 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,900 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,901 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,901 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,902 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T15:37:36,903 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T15:37:36,907 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:37:36,907 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733597, jitterRate=-0.06718358397483826}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T15:37:36,908 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733758656887Initializing all the Stores at 1733758656889 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758656889Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758656891 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758656891Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758656891Cleaning up temporary data from old regions at 1733758656901 (+10 ms)Region opened successfully at 1733758656908 (+7 ms) 2024-12-09T15:37:36,910 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T15:37:36,914 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@727d30a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:37:36,915 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T15:37:36,915 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T15:37:36,915 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T15:37:36,915 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T15:37:36,916 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T15:37:36,916 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T15:37:36,916 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T15:37:36,921 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T15:37:36,922 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T15:37:36,924 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T15:37:36,924 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T15:37:36,925 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T15:37:36,927 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T15:37:36,928 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T15:37:36,929 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T15:37:36,930 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T15:37:36,931 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T15:37:36,933 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T15:37:36,936 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T15:37:36,937 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T15:37:36,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:37:36,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T15:37:36,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,941 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3651d8ead0f3,44363,1733758655731, sessionid=0x1012bd52b450000, setting cluster-up flag (Was=false) 2024-12-09T15:37:36,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,956 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T15:37:36,957 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:36,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:36,967 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T15:37:36,968 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:36,970 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T15:37:36,972 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T15:37:36,972 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T15:37:36,973 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T15:37:36,973 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3651d8ead0f3,44363,1733758655731 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3651d8ead0f3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:37:36,978 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:36,985 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(746): ClusterId : 546a3720-9822-4eba-a694-9e56674f0083 2024-12-09T15:37:36,986 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T15:37:36,990 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T15:37:36,990 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T15:37:36,993 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T15:37:36,994 DEBUG [RS:0;3651d8ead0f3:42475 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b05400f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3651d8ead0f3/172.17.0.3:0 2024-12-09T15:37:36,995 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:37:36,995 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T15:37:36,998 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733758686998 2024-12-09T15:37:36,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T15:37:36,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T15:37:36,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T15:37:36,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T15:37:36,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T15:37:36,999 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T15:37:37,000 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,000 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T15:37:37,007 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,008 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T15:37:37,008 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T15:37:37,008 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T15:37:37,017 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T15:37:37,017 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T15:37:37,018 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3651d8ead0f3:42475 2024-12-09T15:37:37,018 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T15:37:37,018 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T15:37:37,018 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T15:37:37,018 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758657017,5,FailOnTimeoutGroup] 2024-12-09T15:37:37,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:37:37,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741831_1007 (size=1321) 2024-12-09T15:37:37,022 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(2659): reportForDuty to master=3651d8ead0f3,44363,1733758655731 with port=42475, startcode=1733758655845 2024-12-09T15:37:37,023 DEBUG [RS:0;3651d8ead0f3:42475 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T15:37:37,023 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758657018,5,FailOnTimeoutGroup] 2024-12-09T15:37:37,023 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,023 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T15:37:37,023 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,023 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,026 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T15:37:37,026 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e 2024-12-09T15:37:37,027 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41137, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T15:37:37,028 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44363 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,028 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44363 {}] master.ServerManager(517): Registering regionserver=3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,031 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e 2024-12-09T15:37:37,031 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42649 2024-12-09T15:37:37,031 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T15:37:37,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:37:37,034 DEBUG [RS:0;3651d8ead0f3:42475 {}] zookeeper.ZKUtil(111): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,034 WARN [RS:0;3651d8ead0f3:42475 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T15:37:37,035 INFO [RS:0;3651d8ead0f3:42475 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:37:37,035 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,035 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3651d8ead0f3,42475,1733758655845] 2024-12-09T15:37:37,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:37:37,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741832_1008 (size=32) 2024-12-09T15:37:37,045 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:37:37,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:37:37,050 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T15:37:37,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:37:37,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,053 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T15:37:37,054 INFO [RS:0;3651d8ead0f3:42475 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T15:37:37,054 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,060 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T15:37:37,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:37:37,061 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T15:37:37,061 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,061 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3651d8ead0f3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3651d8ead0f3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:37:37,062 DEBUG [RS:0;3651d8ead0f3:42475 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3651d8ead0f3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T15:37:37,062 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:37:37,062 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,062 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,063 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,063 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,063 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,063 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,063 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,42475,1733758655845-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:37:37,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:37:37,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:37:37,065 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:37:37,068 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:37:37,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:37:37,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740 2024-12-09T15:37:37,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740 2024-12-09T15:37:37,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:37:37,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:37:37,073 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:37:37,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:37:37,087 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T15:37:37,088 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T15:37:37,088 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853118, jitterRate=0.08479565382003784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:37:37,088 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,42475,1733758655845-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,089 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,089 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.Replication(171): 3651d8ead0f3,42475,1733758655845 started 2024-12-09T15:37:37,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733758657045Initializing all the Stores at 1733758657046 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758657047 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758657047Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758657047Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758657047Cleaning up temporary data from old regions at 1733758657073 (+26 ms)Region opened successfully at 1733758657089 (+16 ms) 2024-12-09T15:37:37,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:37:37,089 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:37:37,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:37:37,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:37:37,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:37:37,092 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:37:37,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758657089Disabling compacts and flushes for region at 1733758657089Disabling writes for close at 1733758657089Writing region close event to WAL at 1733758657092 (+3 ms)Closed at 1733758657092 2024-12-09T15:37:37,094 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:37:37,094 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T15:37:37,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T15:37:37,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:37:37,097 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T15:37:37,112 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,113 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1482): Serving as 3651d8ead0f3,42475,1733758655845, RpcServer on 3651d8ead0f3/172.17.0.3:42475, sessionid=0x1012bd52b450001 2024-12-09T15:37:37,113 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T15:37:37,113 DEBUG [RS:0;3651d8ead0f3:42475 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,113 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,42475,1733758655845' 2024-12-09T15:37:37,113 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T15:37:37,114 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T15:37:37,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:37,115 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T15:37:37,115 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T15:37:37,115 DEBUG [RS:0;3651d8ead0f3:42475 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,115 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3651d8ead0f3,42475,1733758655845' 2024-12-09T15:37:37,115 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T15:37:37,115 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T15:37:37,116 DEBUG [RS:0;3651d8ead0f3:42475 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T15:37:37,116 INFO [RS:0;3651d8ead0f3:42475 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T15:37:37,116 INFO [RS:0;3651d8ead0f3:42475 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T15:37:37,218 INFO [RS:0;3651d8ead0f3:42475 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C42475%2C1733758655845, suffix=, logDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/3651d8ead0f3,42475,1733758655845, archiveDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/oldWALs, maxLogs=32 2024-12-09T15:37:37,219 INFO [RS:0;3651d8ead0f3:42475 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C42475%2C1733758655845.1733758657219 2024-12-09T15:37:37,232 INFO [RS:0;3651d8ead0f3:42475 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/3651d8ead0f3,42475,1733758655845/3651d8ead0f3%2C42475%2C1733758655845.1733758657219 2024-12-09T15:37:37,239 DEBUG [RS:0;3651d8ead0f3:42475 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38425:38425),(127.0.0.1/127.0.0.1:34845:34845)] 2024-12-09T15:37:37,247 DEBUG [3651d8ead0f3:44363 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T15:37:37,248 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,249 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,42475,1733758655845, state=OPENING 2024-12-09T15:37:37,251 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T15:37:37,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:37,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:37,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:37:37,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:37:37,254 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T15:37:37,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,42475,1733758655845}] 2024-12-09T15:37:37,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:37,407 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T15:37:37,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39603, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T15:37:37,412 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T15:37:37,412 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:37:37,414 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3651d8ead0f3%2C42475%2C1733758655845.meta, suffix=.meta, logDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/3651d8ead0f3,42475,1733758655845, archiveDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/oldWALs, maxLogs=32 2024-12-09T15:37:37,414 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3651d8ead0f3%2C42475%2C1733758655845.meta.1733758657414.meta 2024-12-09T15:37:37,419 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/3651d8ead0f3,42475,1733758655845/3651d8ead0f3%2C42475%2C1733758655845.meta.1733758657414.meta 2024-12-09T15:37:37,423 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38425:38425),(127.0.0.1/127.0.0.1:34845:34845)] 2024-12-09T15:37:37,426 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T15:37:37,426 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T15:37:37,426 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T15:37:37,427 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T15:37:37,427 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T15:37:37,427 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T15:37:37,427 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T15:37:37,427 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T15:37:37,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T15:37:37,429 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T15:37:37,429 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,430 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T15:37:37,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T15:37:37,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T15:37:37,431 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T15:37:37,431 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T15:37:37,432 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T15:37:37,432 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T15:37:37,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T15:37:37,433 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T15:37:37,433 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740 2024-12-09T15:37:37,434 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740 2024-12-09T15:37:37,435 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T15:37:37,435 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T15:37:37,436 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T15:37:37,437 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T15:37:37,438 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830226, jitterRate=0.05568709969520569}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T15:37:37,438 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T15:37:37,438 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733758657427Writing region info on filesystem at 1733758657427Initializing all the Stores at 1733758657428 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758657428Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758657428Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733758657428Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733758657428Cleaning up temporary data from old regions at 1733758657435 (+7 ms)Running coprocessor post-open hooks at 1733758657438 (+3 ms)Region opened successfully at 1733758657438 2024-12-09T15:37:37,440 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733758657407 2024-12-09T15:37:37,442 DEBUG [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T15:37:37,442 INFO [RS_OPEN_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T15:37:37,443 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,444 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3651d8ead0f3,42475,1733758655845, state=OPEN 2024-12-09T15:37:37,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:37:37,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T15:37:37,448 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,448 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:37:37,448 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T15:37:37,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T15:37:37,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3651d8ead0f3,42475,1733758655845 in 194 msec 2024-12-09T15:37:37,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T15:37:37,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 357 msec 2024-12-09T15:37:37,454 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T15:37:37,454 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T15:37:37,456 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:37:37,456 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,42475,1733758655845, seqNum=-1] 2024-12-09T15:37:37,456 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:37:37,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38443, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:37:37,464 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 491 msec 2024-12-09T15:37:37,464 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733758657464, completionTime=-1 2024-12-09T15:37:37,464 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T15:37:37,464 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T15:37:37,466 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T15:37:37,466 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733758717466 2024-12-09T15:37:37,467 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733758777467 2024-12-09T15:37:37,467 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T15:37:37,467 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44363,1733758655731-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,467 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44363,1733758655731-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,467 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44363,1733758655731-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,467 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3651d8ead0f3:44363, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,467 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,468 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,469 DEBUG [master/3651d8ead0f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.559sec 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44363,1733758655731-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T15:37:37,471 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44363,1733758655731-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T15:37:37,474 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T15:37:37,474 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T15:37:37,474 INFO [master/3651d8ead0f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3651d8ead0f3,44363,1733758655731-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T15:37:37,484 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1df4b839, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:37:37,485 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3651d8ead0f3,44363,-1 for getting cluster id 2024-12-09T15:37:37,485 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T15:37:37,487 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '546a3720-9822-4eba-a694-9e56674f0083' 2024-12-09T15:37:37,487 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T15:37:37,487 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "546a3720-9822-4eba-a694-9e56674f0083" 2024-12-09T15:37:37,487 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a8b30a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:37:37,488 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3651d8ead0f3,44363,-1] 2024-12-09T15:37:37,488 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T15:37:37,488 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:37,489 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52000, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T15:37:37,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@499631cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T15:37:37,491 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T15:37:37,492 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3651d8ead0f3,42475,1733758655845, seqNum=-1] 2024-12-09T15:37:37,493 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T15:37:37,494 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T15:37:37,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:37,496 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T15:37:37,499 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T15:37:37,499 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T15:37:37,503 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/oldWALs, maxLogs=32 2024-12-09T15:37:37,504 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733758657503 2024-12-09T15:37:37,510 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733758657503 2024-12-09T15:37:37,511 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34845:34845),(127.0.0.1/127.0.0.1:38425:38425)] 2024-12-09T15:37:37,514 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733758657514 2024-12-09T15:37:37,521 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,521 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,521 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,521 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,521 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,522 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733758657503 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733758657514 2024-12-09T15:37:37,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38425:38425),(127.0.0.1/127.0.0.1:34845:34845)] 2024-12-09T15:37:37,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733758657503 is not closed yet, will try archiving it next time 2024-12-09T15:37:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741835_1011 (size=93) 2024-12-09T15:37:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741835_1011 (size=93) 2024-12-09T15:37:37,525 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733758657503 to hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/oldWALs/test.com%2C8080%2C1.1733758657503 2024-12-09T15:37:37,525 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,525 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,526 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,526 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,527 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741836_1012 (size=93) 2024-12-09T15:37:37,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741836_1012 (size=93) 2024-12-09T15:37:37,533 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/oldWALs 2024-12-09T15:37:37,533 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733758657514) 2024-12-09T15:37:37,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T15:37:37,534 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:37:37,534 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:37:37,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:37,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:37,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T15:37:37,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=848198025, stopped=false 2024-12-09T15:37:37,534 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3651d8ead0f3,44363,1733758655731 2024-12-09T15:37:37,535 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T15:37:37,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:37:37,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:37,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T15:37:37,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:37,539 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:37:37,539 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T15:37:37,539 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:37:37,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:37,539 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3651d8ead0f3,42475,1733758655845' ***** 2024-12-09T15:37:37,539 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T15:37:37,539 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:37:37,539 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T15:37:37,540 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(959): stopping server 3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3651d8ead0f3:42475. 2024-12-09T15:37:37,540 DEBUG [RS:0;3651d8ead0f3:42475 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T15:37:37,540 DEBUG [RS:0;3651d8ead0f3:42475 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T15:37:37,540 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T15:37:37,540 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T15:37:37,540 DEBUG [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T15:37:37,541 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T15:37:37,541 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T15:37:37,541 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T15:37:37,541 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T15:37:37,541 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T15:37:37,541 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T15:37:37,542 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T15:37:37,568 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740/.tmp/ns/18bd65f05657465698e0d2f9e7f76994 is 43, key is default/ns:d/1733758657458/Put/seqid=0 2024-12-09T15:37:37,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741837_1013 (size=5153) 2024-12-09T15:37:37,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741837_1013 (size=5153) 2024-12-09T15:37:37,578 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740/.tmp/ns/18bd65f05657465698e0d2f9e7f76994 2024-12-09T15:37:37,588 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740/.tmp/ns/18bd65f05657465698e0d2f9e7f76994 as hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740/ns/18bd65f05657465698e0d2f9e7f76994 2024-12-09T15:37:37,594 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740/ns/18bd65f05657465698e0d2f9e7f76994, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T15:37:37,595 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 54ms, sequenceid=6, compaction requested=false 2024-12-09T15:37:37,603 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T15:37:37,604 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T15:37:37,604 INFO [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T15:37:37,604 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733758657541Running coprocessor pre-close hooks at 1733758657541Disabling compacts and flushes for region at 1733758657541Disabling writes for close at 1733758657541Obtaining lock to block concurrent updates at 1733758657541Preparing flush snapshotting stores in 1588230740 at 1733758657541Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733758657542 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733758657543 (+1 ms)Flushing 1588230740/ns: creating writer at 1733758657543Flushing 1588230740/ns: appending metadata at 1733758657567 (+24 ms)Flushing 1588230740/ns: closing flushed file at 1733758657567Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@249c2b4f: reopening flushed file at 1733758657587 (+20 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 54ms, sequenceid=6, compaction requested=false at 1733758657595 (+8 ms)Writing region close event to WAL at 1733758657596 (+1 ms)Running coprocessor post-close hooks at 1733758657604 (+8 ms)Closed at 1733758657604 2024-12-09T15:37:37,604 DEBUG [RS_CLOSE_META-regionserver/3651d8ead0f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T15:37:37,741 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(976): stopping server 3651d8ead0f3,42475,1733758655845; all regions closed. 2024-12-09T15:37:37,741 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,742 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,742 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,742 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,742 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741834_1010 (size=1152) 2024-12-09T15:37:37,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741834_1010 (size=1152) 2024-12-09T15:37:37,754 DEBUG [RS:0;3651d8ead0f3:42475 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/oldWALs 2024-12-09T15:37:37,754 INFO [RS:0;3651d8ead0f3:42475 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C42475%2C1733758655845.meta:.meta(num 1733758657414) 2024-12-09T15:37:37,755 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,756 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,758 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,758 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:37,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741833_1009 (size=93) 2024-12-09T15:37:37,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741833_1009 (size=93) 2024-12-09T15:37:37,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/MasterData/WALs/3651d8ead0f3,43929,1733758386761/3651d8ead0f3%2C43929%2C1733758386761.1733758387210 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:37,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.1733758387817 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:37,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:37,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,063 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T15:37:38,063 INFO [regionserver/3651d8ead0f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T15:37:38,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,45133,1733758388518/3651d8ead0f3%2C45133%2C1733758388518.1733758388815 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:38,185 DEBUG [RS:0;3651d8ead0f3:42475 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/oldWALs 2024-12-09T15:37:38,186 INFO [RS:0;3651d8ead0f3:42475 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3651d8ead0f3%2C42475%2C1733758655845:(num 1733758657219) 2024-12-09T15:37:38,186 DEBUG [RS:0;3651d8ead0f3:42475 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T15:37:38,186 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T15:37:38,186 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:37:38,186 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.ChoreService(370): Chore service for: regionserver/3651d8ead0f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T15:37:38,186 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:37:38,186 INFO [regionserver/3651d8ead0f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:37:38,186 INFO [RS:0;3651d8ead0f3:42475 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42475 2024-12-09T15:37:38,192 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:37:38,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3651d8ead0f3,42475,1733758655845 2024-12-09T15:37:38,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T15:37:38,194 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3651d8ead0f3,42475,1733758655845] 2024-12-09T15:37:38,196 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3651d8ead0f3,42475,1733758655845 already deleted, retry=false 2024-12-09T15:37:38,196 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3651d8ead0f3,42475,1733758655845 expired; onlineServers=0 2024-12-09T15:37:38,196 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3651d8ead0f3,44363,1733758655731' ***** 2024-12-09T15:37:38,196 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T15:37:38,196 INFO [M:0;3651d8ead0f3:44363 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T15:37:38,196 INFO [M:0;3651d8ead0f3:44363 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T15:37:38,196 DEBUG [M:0;3651d8ead0f3:44363 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T15:37:38,196 DEBUG [M:0;3651d8ead0f3:44363 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T15:37:38,196 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T15:37:38,196 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758657017 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.large.0-1733758657017,5,FailOnTimeoutGroup] 2024-12-09T15:37:38,196 DEBUG [master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758657018 {}] cleaner.HFileCleaner(306): Exit Thread[master/3651d8ead0f3:0:becomeActiveMaster-HFileCleaner.small.0-1733758657018,5,FailOnTimeoutGroup] 2024-12-09T15:37:38,196 INFO [M:0;3651d8ead0f3:44363 {}] hbase.ChoreService(370): Chore service for: master/3651d8ead0f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T15:37:38,196 INFO [M:0;3651d8ead0f3:44363 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T15:37:38,196 DEBUG [M:0;3651d8ead0f3:44363 {}] master.HMaster(1795): Stopping service threads 2024-12-09T15:37:38,196 INFO [M:0;3651d8ead0f3:44363 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T15:37:38,196 INFO [M:0;3651d8ead0f3:44363 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T15:37:38,197 INFO [M:0;3651d8ead0f3:44363 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T15:37:38,197 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T15:37:38,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T15:37:38,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T15:37:38,200 DEBUG [M:0;3651d8ead0f3:44363 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T15:37:38,201 DEBUG [M:0;3651d8ead0f3:44363 {}] master.ActiveMasterManager(353): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T15:37:38,201 INFO [M:0;3651d8ead0f3:44363 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/.lastflushedseqids 2024-12-09T15:37:38,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741838_1014 (size=108) 2024-12-09T15:37:38,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741838_1014 (size=108) 2024-12-09T15:37:38,208 INFO [M:0;3651d8ead0f3:44363 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T15:37:38,208 INFO [M:0;3651d8ead0f3:44363 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T15:37:38,208 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T15:37:38,208 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:38,209 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:38,209 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T15:37:38,209 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:38,209 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T15:37:38,229 DEBUG [M:0;3651d8ead0f3:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/50ec150a89514bb6858ab34c4d4a8050 is 82, key is hbase:meta,,1/info:regioninfo/1733758657443/Put/seqid=0 2024-12-09T15:37:38,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741839_1015 (size=5672) 2024-12-09T15:37:38,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741839_1015 (size=5672) 2024-12-09T15:37:38,239 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/50ec150a89514bb6858ab34c4d4a8050 2024-12-09T15:37:38,266 DEBUG [M:0;3651d8ead0f3:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bc2f602b1c5d483fa719c2145d16736e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733758657463/Put/seqid=0 2024-12-09T15:37:38,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741840_1016 (size=5275) 2024-12-09T15:37:38,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741840_1016 (size=5275) 2024-12-09T15:37:38,277 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bc2f602b1c5d483fa719c2145d16736e 2024-12-09T15:37:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42475-0x1012bd52b450001, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:38,294 INFO [RS:0;3651d8ead0f3:42475 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:37:38,294 INFO [RS:0;3651d8ead0f3:42475 {}] regionserver.HRegionServer(1031): Exiting; stopping=3651d8ead0f3,42475,1733758655845; zookeeper connection closed. 2024-12-09T15:37:38,300 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5b46ba50 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5b46ba50 2024-12-09T15:37:38,302 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T15:37:38,313 DEBUG [M:0;3651d8ead0f3:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b57b191f8aba4df8b7cb759ca661855f is 69, key is 3651d8ead0f3,42475,1733758655845/rs:state/1733758657028/Put/seqid=0 2024-12-09T15:37:38,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741841_1017 (size=5156) 2024-12-09T15:37:38,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741841_1017 (size=5156) 2024-12-09T15:37:38,325 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b57b191f8aba4df8b7cb759ca661855f 2024-12-09T15:37:38,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38871/user/jenkins/test-data/5460a68e-2c96-e864-6998-4735b2176647/WALs/3651d8ead0f3,34623,1733758386874/3651d8ead0f3%2C34623%2C1733758386874.meta.1733758388267.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T15:37:38,351 DEBUG [M:0;3651d8ead0f3:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/12761a8bc20543c3ae297bb6db1dc6d9 is 52, key is load_balancer_on/state:d/1733758657498/Put/seqid=0 2024-12-09T15:37:38,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741842_1018 (size=5056) 2024-12-09T15:37:38,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741842_1018 (size=5056) 2024-12-09T15:37:38,357 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/12761a8bc20543c3ae297bb6db1dc6d9 2024-12-09T15:37:38,362 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/50ec150a89514bb6858ab34c4d4a8050 as hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/50ec150a89514bb6858ab34c4d4a8050 2024-12-09T15:37:38,367 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/50ec150a89514bb6858ab34c4d4a8050, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T15:37:38,368 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bc2f602b1c5d483fa719c2145d16736e as hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bc2f602b1c5d483fa719c2145d16736e 2024-12-09T15:37:38,373 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bc2f602b1c5d483fa719c2145d16736e, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T15:37:38,374 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b57b191f8aba4df8b7cb759ca661855f as hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b57b191f8aba4df8b7cb759ca661855f 2024-12-09T15:37:38,379 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b57b191f8aba4df8b7cb759ca661855f, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T15:37:38,380 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/12761a8bc20543c3ae297bb6db1dc6d9 as hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/12761a8bc20543c3ae297bb6db1dc6d9 2024-12-09T15:37:38,394 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/f4972bcc-07b4-fc18-85d1-4a8324177d4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/12761a8bc20543c3ae297bb6db1dc6d9, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T15:37:38,396 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=29, compaction requested=false 2024-12-09T15:37:38,397 INFO [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T15:37:38,397 DEBUG [M:0;3651d8ead0f3:44363 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733758658208Disabling compacts and flushes for region at 1733758658208Disabling writes for close at 1733758658209 (+1 ms)Obtaining lock to block concurrent updates at 1733758658209Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733758658209Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733758658209Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733758658210 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733758658210Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733758658228 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733758658228Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733758658244 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733758658265 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733758658265Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733758658290 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733758658312 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733758658312Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733758658330 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733758658351 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733758658351Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@168e28e7: reopening flushed file at 1733758658361 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6848dadf: reopening flushed file at 1733758658368 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@650dc3c4: reopening flushed file at 1733758658373 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@767e5b67: reopening flushed file at 1733758658379 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=29, compaction requested=false at 1733758658396 (+17 ms)Writing region close event to WAL at 1733758658397 (+1 ms)Closed at 1733758658397 2024-12-09T15:37:38,398 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:38,398 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:38,398 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:38,398 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:38,398 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T15:37:38,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741830_1006 (size=10311) 2024-12-09T15:37:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46335 is added to blk_1073741830_1006 (size=10311) 2024-12-09T15:37:38,402 INFO [M:0;3651d8ead0f3:44363 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T15:37:38,402 INFO [M:0;3651d8ead0f3:44363 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44363 2024-12-09T15:37:38,402 INFO [M:0;3651d8ead0f3:44363 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T15:37:38,402 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T15:37:38,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:38,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x1012bd52b450000, quorum=127.0.0.1:55641, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T15:37:38,504 INFO [M:0;3651d8ead0f3:44363 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T15:37:38,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@616bab4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:37:38,508 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2269c58e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:37:38,508 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:37:38,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59532081{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:37:38,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20dd8a9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.log.dir/,STOPPED} 2024-12-09T15:37:38,510 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:37:38,510 WARN [BP-293280157-172.17.0.3-1733758654087 heartbeating to localhost/127.0.0.1:42649 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:37:38,510 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:37:38,510 WARN [BP-293280157-172.17.0.3-1733758654087 heartbeating to localhost/127.0.0.1:42649 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-293280157-172.17.0.3-1733758654087 (Datanode Uuid 4bc40f71-c804-4fd1-9979-000ddf434d51) service to localhost/127.0.0.1:42649 2024-12-09T15:37:38,511 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data3/current/BP-293280157-172.17.0.3-1733758654087 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:38,511 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data4/current/BP-293280157-172.17.0.3-1733758654087 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:38,511 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:37:38,513 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T15:37:38,513 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ab56e91{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T15:37:38,514 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5eefcd14{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:37:38,514 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:37:38,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d45b9b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:37:38,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ec9da7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.log.dir/,STOPPED} 2024-12-09T15:37:38,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T15:37:38,563 WARN [BP-293280157-172.17.0.3-1733758654087 heartbeating to localhost/127.0.0.1:42649 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T15:37:38,563 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T15:37:38,563 WARN [BP-293280157-172.17.0.3-1733758654087 heartbeating to localhost/127.0.0.1:42649 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-293280157-172.17.0.3-1733758654087 (Datanode Uuid 37c55293-3aa0-4b87-9908-7499147af738) service to localhost/127.0.0.1:42649 2024-12-09T15:37:38,563 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T15:37:38,564 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data1/current/BP-293280157-172.17.0.3-1733758654087 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:38,564 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/cluster_0c615871-8822-9326-6903-0eada1624e76/data/data2/current/BP-293280157-172.17.0.3-1733758654087 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T15:37:38,564 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T15:37:38,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20339ff2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T15:37:38,571 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@238bf9b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T15:37:38,571 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T15:37:38,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a47ab0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T15:37:38,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61266769{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1cc2488-b701-8b5f-578f-cdc71f0cd3d1/hadoop.log.dir/,STOPPED} 2024-12-09T15:37:38,580 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T15:37:38,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T15:37:38,609 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 232) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42649 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42649 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42649 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42649 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42649 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42649 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42649 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42649 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 506) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=534 (was 534), ProcessCount=11 (was 11), AvailableMemoryMB=4884 (was 5366)